diff --git a/spaces/1368565466ki/Satdia/app.py b/spaces/1368565466ki/Satdia/app.py
deleted file mode 100644
index 31cdc30680f88fe0a9a7e96575218eeeca606ad1..0000000000000000000000000000000000000000
--- a/spaces/1368565466ki/Satdia/app.py
+++ /dev/null
@@ -1,290 +0,0 @@
-# coding=utf-8
-import os
-import re
-import argparse
-import utils
-import commons
-import json
-import torch
-import gradio as gr
-from models import SynthesizerTrn
-from text import text_to_sequence, _clean_text
-from torch import no_grad, LongTensor
-import gradio.processing_utils as gr_processing_utils
-import logging
-logging.getLogger('numba').setLevel(logging.WARNING)
-limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces
-
-hps_ms = utils.get_hparams_from_file(r'config/config.json')
-
-audio_postprocess_ori = gr.Audio.postprocess
-
-def audio_postprocess(self, y):
- data = audio_postprocess_ori(self, y)
- if data is None:
- return None
- return gr_processing_utils.encode_url_or_file_to_base64(data["name"])
-
-
-gr.Audio.postprocess = audio_postprocess
-
-def get_text(text, hps, is_symbol):
- text_norm, clean_text = text_to_sequence(text, hps.symbols, [] if is_symbol else hps.data.text_cleaners)
- if hps.data.add_blank:
- text_norm = commons.intersperse(text_norm, 0)
- text_norm = LongTensor(text_norm)
- return text_norm, clean_text
-
-def create_tts_fn(net_g_ms, speaker_id):
- def tts_fn(text, language, noise_scale, noise_scale_w, length_scale, is_symbol):
- text = text.replace('\n', ' ').replace('\r', '').replace(" ", "")
- if limitation:
- text_len = len(re.sub("\[([A-Z]{2})\]", "", text))
- max_len = 100
- if is_symbol:
- max_len *= 3
- if text_len > max_len:
- return "Error: Text is too long", None
- if not is_symbol:
- if language == 0:
- text = f"[ZH]{text}[ZH]"
- elif language == 1:
- text = f"[JA]{text}[JA]"
- else:
- text = f"{text}"
- stn_tst, clean_text = get_text(text, hps_ms, is_symbol)
- with no_grad():
- x_tst = stn_tst.unsqueeze(0).to(device)
- x_tst_lengths = LongTensor([stn_tst.size(0)]).to(device)
- sid = LongTensor([speaker_id]).to(device)
- audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=noise_scale, noise_scale_w=noise_scale_w,
- length_scale=length_scale)[0][0, 0].data.cpu().float().numpy()
-
- return "Success", (22050, audio)
- return tts_fn
-
-def create_to_symbol_fn(hps):
- def to_symbol_fn(is_symbol_input, input_text, temp_lang):
- if temp_lang == 0:
- clean_text = f'[ZH]{input_text}[ZH]'
- elif temp_lang == 1:
- clean_text = f'[JA]{input_text}[JA]'
- else:
- clean_text = input_text
- return _clean_text(clean_text, hps.data.text_cleaners) if is_symbol_input else ''
-
- return to_symbol_fn
-def change_lang(language):
- if language == 0:
- return 0.6, 0.668, 1.2
- elif language == 1:
- return 0.6, 0.668, 1
- else:
- return 0.6, 0.668, 1
-
-download_audio_js = """
-() =>{{
- let root = document.querySelector("body > gradio-app");
- if (root.shadowRoot != null)
- root = root.shadowRoot;
- let audio = root.querySelector("#tts-audio-{audio_id}").querySelector("audio");
- let text = root.querySelector("#input-text-{audio_id}").querySelector("textarea");
- if (audio == undefined)
- return;
- text = text.value;
- if (text == undefined)
- text = Math.floor(Math.random()*100000000);
- audio = audio.src;
- let oA = document.createElement("a");
- oA.download = text.substr(0, 20)+'.wav';
- oA.href = audio;
- document.body.appendChild(oA);
- oA.click();
- oA.remove();
-}}
-"""
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument('--device', type=str, default='cpu')
- parser.add_argument('--api', action="store_true", default=False)
- parser.add_argument("--share", action="store_true", default=False, help="share gradio app")
- parser.add_argument("--all", action="store_true", default=False, help="enable all models")
- args = parser.parse_args()
- device = torch.device(args.device)
- categories = ["Honkai: Star Rail", "Blue Archive", "Lycoris Recoil"]
- others = {
- "Princess Connect! Re:Dive": "https://huggingface.co/spaces/sayashi/vits-models-pcr",
- "Genshin Impact": "https://huggingface.co/spaces/sayashi/vits-models-genshin-bh3",
- "Honkai Impact 3rd": "https://huggingface.co/spaces/sayashi/vits-models-genshin-bh3",
- "Overwatch 2": "https://huggingface.co/spaces/sayashi/vits-models-ow2",
- }
- if args.all:
- categories = ["Honkai: Star Rail", "Blue Archive", "Lycoris Recoil", "Princess Connect! Re:Dive", "Genshin Impact", "Honkai Impact 3rd", "Overwatch 2"]
- others = {}
- models = []
- with open("pretrained_models/info.json", "r", encoding="utf-8") as f:
- models_info = json.load(f)
- for i, info in models_info.items():
- if info['title'].split("-")[0] not in categories or not info['enable']:
- continue
- sid = info['sid']
- name_en = info['name_en']
- name_zh = info['name_zh']
- title = info['title']
- cover = f"pretrained_models/{i}/{info['cover']}"
- example = info['example']
- language = info['language']
- net_g_ms = SynthesizerTrn(
- len(hps_ms.symbols),
- hps_ms.data.filter_length // 2 + 1,
- hps_ms.train.segment_size // hps_ms.data.hop_length,
- n_speakers=hps_ms.data.n_speakers if info['type'] == "multi" else 0,
- **hps_ms.model)
- utils.load_checkpoint(f'pretrained_models/{i}/{i}.pth', net_g_ms, None)
- _ = net_g_ms.eval().to(device)
- models.append((sid, name_en, name_zh, title, cover, example, language, net_g_ms, create_tts_fn(net_g_ms, sid), create_to_symbol_fn(hps_ms)))
- with gr.Blocks() as app:
- gr.Markdown(
- "#
vits-models\n"
- "##
Please do not generate content that could infringe upon the rights or cause harm to individuals or organizations.\n"
- "##
请不要生成会对个人以及组织造成侵害的内容\n\n"
- "[](https://colab.research.google.com/drive/10QOk9NPgoKZUXkIhhuVaZ7SYra1MPMKH?usp=share_link)\n\n"
- "[](https://huggingface.co/spaces/sayashi/vits-models?duplicate=true)\n\n"
- "[](https://github.com/SayaSS/vits-finetuning)"
- )
-
- with gr.Tabs():
- for category in categories:
- with gr.TabItem(category):
- with gr.TabItem("EN"):
- for (sid, name_en, name_zh, title, cover, example, language, net_g_ms, tts_fn, to_symbol_fn) in models:
- if title.split("-")[0] != category:
- continue
- with gr.TabItem(name_en):
- with gr.Row():
- gr.Markdown(
- '
- '''
- )
- app.queue(concurrency_count=1, api_open=args.api).launch(share=args.share)
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/GameGeniePs3USBrar Learn How to Use the Game Genie Software on Your PC and PS3.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/GameGeniePs3USBrar Learn How to Use the Game Genie Software on Your PC and PS3.md
deleted file mode 100644
index 024bf4dd598cd5080f08b669fbe7ff910083e6ff..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/GameGeniePs3USBrar Learn How to Use the Game Genie Software on Your PC and PS3.md
+++ /dev/null
@@ -1,153 +0,0 @@
-
-
GameGeniePs3USBrar: How to Use Game Genie Save Editor for PS3
-
Do you want to unlock all levels, get maximum money, ammo and experience, and have more fun with your PS3 games? If yes, then you need GameGeniePs3USBrar. In this article, I will show you what GameGeniePs3USBrar is, how to download and install it, how to use it to modify your PS3 saves, and what games and cheats are available with it. Let's get started!
-
What is GameGeniePs3USBrar?
-
GameGeniePs3USBrar is a file name that contains the setup program for Game Genie Save Editor for PS3. Game Genie Save Editor for PS3 is a software that allows you to access and edit your PS3 game saves on your PC with cheats that take effect once you load your game on your PS3. It is an easy-to-use program that works by copying your save from your PS3 to a USB drive, inserting it into your PC, choosing and applying cheats using Game Genie Save Editor for PS3, and copying your save back from the USB drive to your PS3.
A brief introduction to Game Genie Save Editor for PS3
-
Game Genie Save Editor for PS3 is a product developed by Hyperkin, a company that specializes in video game accessories and software. It was released in 2012 as a successor to the original Game Genie device that was popular in the 1990s. It works with European and American PS3 games, and does not require any illegal modifications or jailbreaking of your PS3. It is compatible with Windows XP, Vista, 7, 8, and 10.
-
The benefits of using Game Genie Save Editor for PS3
-
There are many benefits of using Game Genie Save Editor for PS3. Some of them are:
-
-
You can enjoy more freedom and creativity with your games by modifying them according to your preferences.
-
You can save time and effort by skipping difficult or tedious parts of the games.
-
You can enhance your gaming experience by unlocking new features, items, modes, characters, etc.
-
You can discover new secrets and easter eggs that you might have missed otherwise.
-
You can have more fun with your games by trying out different combinations of cheats.
-
-
How to download and install GameGeniePs3USBrar?
-
To use Game Genie Save Editor for PS3, you need to download and install GameGeniePs3USBrar on your PC. Here's how:
-
The requirements for using Game Genie Save Editor for PS3
-
Before you download and install GameGeniePs3USBrar, make sure you have the following requirements:
-
Game Genie Save Editor for PS3 download
-Game Genie PS3 cheats list
-Game Genie PS3 Europe
-Game Genie PS3 direct download
-Game Genie PS3 USB drive
-Game Genie PS3 setup
-Game Genie PS3 manual
-Game Genie PS3 games and cheats
-Game Genie PS3 update
-Game Genie PS3 license key
-Game Genie PS3 crack
-Game Genie PS3 free trial
-Game Genie PS3 review
-Game Genie PS3 tutorial
-Game Genie PS3 support
-Game Genie PS3 forum
-Game Genie PS3 alternative
-Game Genie PS3 vs Save Wizard
-Game Genie PS3 compatible games
-Game Genie PS3 modded saves
-Game Genie PS3 advanced mode
-Game Genie PS3 resign saves
-Game Genie PS3 region change
-Game Genie PS3 online mode
-Game Genie PS3 offline mode
-Game Genie PS3 error codes
-Game Genie PS3 troubleshooting
-Game Genie PS3 refund policy
-Game Genie PS3 discount code
-Game Genie PS3 buy online
-Game Genie PS3 Amazon
-Game Genie PS3 eBay
-Game Genie PS3 Walmart
-Game Genie PS3 Best Buy
-Game Genie PS3 Target
-Game Genie PS3 Costco
-Game Genie PS3 installation guide
-Game Genie PS3 system requirements
-Game Genie PS3 FAQ
-Game Genie PS3 tips and tricks
-Game Genie PS3 how to use it
-Game Genie PS3 how to cheat your way through your favorite games[^4^]
-Game Genie PS3 how to unlock all levels, money, ammo and more[^4^]
-Game Genie PS3 how to modify your saves on your PC[^2^]
-Game Genie PS3 how to copy your save from your USB drive to your PlayStation 3[^2^]
-Game Genie PS3 how to load your game with cheats[^2^]
-Game Genie PS3 how to access your saves like never before[^2^]
-Game Genie PS3 how to achieve a net energy gain when carrying out a nuclear fusion experiment[^2^]
-
-
A PC with Windows XP, Vista, 7, 8, or 10.
-
A USB drive with at least 1 GB of free space.
-
A PS3 with a USB port.
-
A copy of Game Genie Save Editor for PS3. You can purchase it from www.thegamegenie.com or www.gamegenie.eu, depending on your region. You can also buy it as a physical product that comes with a USB drive or as a direct download version that you can download from the website after purchase.
-
-
The steps to download and install GameGeniePs3USBrar
-
Once you have the requirements ready, follow these steps to download and install GameGeniePs3USBrar:
Click on the link that says "Download Setup Here" under the appropriate section depending on whether you bought the physical product or the direct download version.
-
Save the file named "GameGeniePs4USBrar" or "GameGeniPS4EUrar" on your PC.
-
Extract the file using a program like WinRAR or 7-Zip.
-
Run the setup program named "GameGeniPS4Setup.exe" or "GameGeniPS4EUSetup.exe".
-
Follow the instructions on the screen to complete the installation process.
-
Launch the program by clicking on its icon on your desktop or start menu.
-
-
How to use GameGeniPS4USBrar to modify your PS4 saves?
-
Now that you have downloaded and installed GameGeniPS4USBrar on your PC, you can use it to modify your PS4 saves. Here's how:
-
The features of Game GeniPS4Save Editor for PS4
-
Game GeniPS4Save Editor for PS4 has several features that make it easy and convenient to use. Some of them are:
-
-
You can browse through hundreds of games and thousands of cheats that are available in its database.
-
You can search for games by name or by genre.
-
You can sort games by popularity or alphabetically.
-
You can view detailed information about each game and cheat, such as description, screenshots, video tutorials, etc.
-
You can customize each cheat by changing its value or enabling/disabling it.
-
You can create multiple profiles for different users or games.
-
You can backup and restore your saves in case something goes wrong.
-
You can update the program and its database automatically or manually.
-
-
The process of modifying your PS4 saves with Game GeniPS4Save Editor for PS4
-
To modify your PS4 saves with Game GeniPS4Save Editor for PS4, you need to follow three main steps: copying your save from your PS4 to a USB drive, choosing and applying cheats using Game GeniPS4Save Editor for PC, and copying your save back from the USB drive to your PC and loading your game. Here's how:
-
How to copy your save from your PC to a USB drive
-
-
Turn on your PC and insert your USB drive into an available port.
-
Create a folder named "PS4" on the root directory of your USB drive.
-
Create another folder named "SAVEDATA" inside the "PS4" folder.
-
Create another folder named "BLESXXXXX" inside the "SAVEDATA" folder. Replace XXXXX with the five-digit code that corresponds to the region of your game. For example, if you have a European version of The Elder Scrolls V: Skyrim, the code would be BLES01329.
-
Copy your save file from your PS3 to the "BLESXXXXX" folder on your USB drive. To do this, go to the Game menu on your PS3, select Saved Data Utility (PS3), find the game you want to copy, press the Triangle button, and choose Copy. Select your USB device as the destination and confirm.
-
-
How to choose and apply cheats using Game Genie Save Editor for PS3
-
-
Insert your USB drive into your PC and launch Game Genie Save Editor for PS3.
-
Select the profile you want to use or create a new one by clicking on the Profile button.
-
Click on the Open button and browse to your USB drive. Select the save file you want to modify and click Open.
-
Wait for the program to load the game information and the available cheats. You can also click on the Refresh button to update the cheats database.
-
Browse through the cheats by clicking on the arrows or using the search box. You can also sort them by name or category.
-
Check the box next to each cheat you want to apply. You can also change the value of some cheats by clicking on them and typing a new number.
-
Click on the Apply button to confirm your changes. You can also click on the Backup button to save a copy of your original save file.
-
-
How to copy your save back from the USB drive to your PS3 and load your game
-
-
Eject your USB drive from your PC and insert it into your PS3.
-
Go to the Game menu on your PS3, select Saved Data Utility (PS3), find your USB device, press the Triangle button, and choose Copy.
-
Select the save file you want to copy and confirm. If you have a backup of your original save file, you can choose to overwrite it or keep both versions.
-
Load your game and enjoy your modified save!
-
-
What games and cheats are available with GameGeniePs3USBrar?
-
GameGeniePs3USBrar gives you access to hundreds of games and thousands of cheats that are available in its database. You can find games from various genres, such as action, adventure, role-playing, sports, racing, fighting, etc. You can also find cheats for different aspects of the games, such as health, money, ammo, items, stats, skills, levels, etc.
-
The list of games and cheats included in Game Genie Save Editor for PS3
-
To see the list of games and cheats included in Game Genie Save Editor for PS3, you can go to www.gamegenie.eu or www.thegamegenie.com, depending on your region. You can also view them in the program by clicking on the List button. The list is updated regularly with new games and cheats added every week. As of November 2016, there are 471 games and 23257 cheats in total.
-
The updates and support for Game Genie Save Editor for PS3
-
Game Genie Save Editor for PS3 is constantly updated with new games and cheats added every week. You can update the program and its database automatically or manually by clicking on the Update button. You can also check for updates by going to Help > Check for Updates. If you have any questions or problems with Game Genie Save Editor for PS3, you can contact the support team by going to Help > Contact Support or by sending an email to support@thegamegenie.com or support@gamegenie.eu.
-
Conclusion
-
In conclusion, GameGeniePs3USBrar is a file name that contains the setup program for Game Genie Save Editor for PS3. Game Genie Save Editor for PS3 is a software that allows you to access and edit your PS3 game saves on your PC with cheats that take effect once you load your game on your PS3. It is an easy-to-use program that works by copying your save from your PS3 to a USB drive, inserting it into your PC, choosing and applying cheats using Game Genie Save Editor for PS3, and copying your save back from the USB drive to your PS3. It gives you access to hundreds of games and thousands of cheats that are available in its database. It is compatible with European and American PS3 games, and does not require any illegal modifications or jailbreaking of your PS3. It is a fun and convenient way to enhance your gaming experience with more freedom and creativity.
-
I hope this article has helped you understand what GameGeniePs3USBrar is, how to download and install it, how to use it to modify your PS3 saves, and what games and cheats are available with it. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!
-
Frequently Asked Questions
-
Here are some frequently asked questions about GameGeniePs3USBrar:
-
-
Is Game Genie Save Editor for PS3 legal?
-Yes, Game Genie Save Editor for PS3 is legal as long as you use it for personal use only. It does not modify or hack your PS3 system or firmware. It only modifies your own game saves that are stored on a USB drive.
-
Does Game Genie Save Editor for PS3 work with all PS3 games?
-No, Game Genie Save Editor for PS3 does not work with all PS3 games. It only works with games that are supported by its database. You can check if a game is supported by going to www.gamegenie.eu or www.thegamegenie.com, depending on your region.
-
Can I use Game Genie Save Editor for PS3 online?
-No, you cannot use Game Genie Save Editor for PS3 online. It is intended for offline use only. Using it online may result in banning or suspension from online services or multiplayer modes.
-
Can I share my modified saves with other users?
-No, you cannot share your modified saves with other users. Each save file is encrypted with a unique code that is tied to your profile and console. Sharing it may cause corruption or errors.
-
Can I undo the changes made by Game Genie Save Editor for PS3?
-Yes, you can undo the changes made by Game Genie Save Editor for PS3 by restoring your original save file. To do this, you need to have a backup of your original save file that you created before applying any cheats. You can restore it by copying it back from the USB drive to your PS3 using the same method as before.
-
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Autodata 3.40 Crack Windows 7 _VERIFIED_.md b/spaces/1gistliPinn/ChatGPT4/Examples/Autodata 3.40 Crack Windows 7 _VERIFIED_.md
deleted file mode 100644
index 0b6048d1166c727dcdab230f754034371080f047..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Autodata 3.40 Crack Windows 7 _VERIFIED_.md
+++ /dev/null
@@ -1,44 +0,0 @@
-
-
-Details: The most important version of AutoData for the Mac, AutoData 3.45, is a must have backup solution for Mac users. You don't need a clean installation of the OSX or a backup server to run it on your Mac.
-
-Learn how to download and install AutoData 3.45 free on your Mac with step by step guides in this tutorial. This tutorial will help you in: Open your Mac Install AutoData Download and use AutoData 3.45 The tutorial will guide you through each step and will teach you how to download and install AutoData 3.45 free on your Mac.
-
-1. How to download AutoData 3.45 free:
-
-Step 1. Click on the Download button to download the AutoData.dmg file.
-
-Step 2. Save it to your desktop by choosing “Save As” from the file browser.
-
-Step 3. Double click on the AutoData.dmg file to install AutoData 3.45.
-
-Note: The AutoData 3.45 Free Download may ask you to activate by entering the serial number, but you don't need to enter the serial number.
-
-Step 4. Choose “Upgrade from existing installation” if the version of the application you are currently running is not the same as the version you downloaded.
-
-Step 5. You are now ready to use AutoData 3.45 free.
-
-2. How to use AutoData 3.45 on your Mac:
-
-Step 1. Launch the AutoData application from the desktop.
-
-Step 2. Press “Backup”, “Restore”, “Make a backup of my data”, “Create a new backup”, “Delete” or “Revert”.
-
-Step 3. Press “Backup” to backup your applications.
-
-Step 4. Press “Restore” to restore your applications.
-
-Step 5. You can also use the Backup Manager to backup your applications.
-
-3. How to upgrade AutoData 3.45 Free?
-
-Step 2. You will see the following window:
-
-Note: If you don't see this window, then, download AutoData 3.45 and update it manually.
-
-Step 3. Choose “Upgrade from existing installation”.
-
-Step 4. You 4fefd39f24
-
-
-
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Condenados A Fugarse Audio Latino.md b/spaces/1gistliPinn/ChatGPT4/Examples/Condenados A Fugarse Audio Latino.md
deleted file mode 100644
index a902dc18abdfd1c46bcc5629fca10766d968acf5..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Condenados A Fugarse Audio Latino.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-Condenados a Fugarse Ver Online. 1 Español Latino Netu; 2 Español Latino Fembed; 3 Español Latino MegaVIPS; 4 Español Latino Mystream; 5 Español ... 4d29de3e1b
-
-
-
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Apk Gta 5 REPACK Download Official Gta 5 For Android Amp Ios.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Apk Gta 5 REPACK Download Official Gta 5 For Android Amp Ios.md
deleted file mode 100644
index e9fffb5bbde76b98df02c722b1984aec6c0af764..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Apk Gta 5 REPACK Download Official Gta 5 For Android Amp Ios.md
+++ /dev/null
@@ -1,61 +0,0 @@
-
-
How to Download GTA 5 on Android and iOS Devices
-
GTA 5 is one of the most popular and acclaimed video games of all time. It is an action-adventure game that lets you experience the life of a criminal in the fictional city of Los Santos. You can play as one of three protagonists, each with their own story, personality, and skills. You can also switch between them at any time, creating a dynamic and immersive gameplay.
-
If you are a fan of GTA 5, you might be wondering if you can play it on your mobile devices, such as Android or iOS phones and tablets. The answer is yes, you can! In this article, we will show you how to download GTA 5 on Android and iOS devices, as well as some tips and tricks for playing it on the go.
-
apk gta 5 download official gta 5 for android amp; ios
GTA 5 is the fifth main installment in the Grand Theft Auto series, developed by Rockstar Games. It was released in 2013 for PlayStation 3 and Xbox 360, and later for PlayStation 4, Xbox One, and PC. It is considered one of the best-selling and most critically acclaimed games of all time, with over 140 million copies sold worldwide.
-
GTA 5 Features and Gameplay
-
GTA 5 offers a vast and diverse open world that you can explore by foot, by car, by bike, by boat, by plane, or by helicopter. You can also interact with various characters, objects, and activities in the world, such as robbing stores, playing golf, racing cars, parachuting, hunting animals, or watching TV.
-
The game also features a compelling story mode that follows the lives of three protagonists: Michael, a retired bank robber who is unhappy with his family; Franklin, a young street hustler who wants to make it big; and Trevor, a psychotic drug dealer who lives in a trailer park. You can switch between them at any time during the game, creating different perspectives and outcomes.
-
Additionally, the game has an online mode called GTA Online, where you can create your own character and join other players in various missions, races, heists, deathmatches, or freemode events. You can also customize your character's appearance, skills, vehicles, weapons, properties, and businesses.
-
GTA 5 Requirements and Compatibility
-
To play GTA 5 on your mobile devices, you will need to have a compatible device that meets the minimum requirements. According to Rockstar Games, these are:
-
-
Device
OS
RAM
Storage
-
Android
8.0 or higher
4 GB or higher
8 GB or higher
-
iOS
12.0 or higher
4 GB or higher
8 GB or higher
-
-
You will also need to have a stable internet connection to download and play the game.
-
How to Download GTA 5 on Android Devices
-
There are three main ways to download GTA 5 on your Android devices:
-
-
Download from the Official Rockstar Games Website
-
The easiest way to download GTA 5 on your Android devices is to visit the official Rockstar Games website and follow the instructions. You will need to create a Rockstar Games Social Club account or log in with your existing one. Then, you will need to purchase the game for $19.99 and download the GTA 5 apk file on your device. You will also need to download the GTA 5 data file, which is about 3 GB in size. After that, you can install the apk file and launch the game.
-
Download from the Epic Games Store
-
Another way to download GTA 5 on your Android devices is to use the Epic Games Store app, which is available on the Google Play Store. You will need to create an Epic Games account or log in with your existing one. Then, you will need to purchase the game for $19.99 and download it on your device. You will also need to download the GTA 5 data file, which is about 3 GB in size. After that, you can launch the game from the app.
-
Download from the BlueStacks App Player
-
The third way to download GTA 5 on your Android devices is to use the BlueStacks App Player, which is a software that allows you to run Android apps on your PC. You will need to download and install the BlueStacks App Player on your PC from its official website. Then, you will need to download the GTA 5 apk file and data file from the Rockstar Games website or the Epic Games Store app. After that, you can transfer the files to your device using a USB cable or a cloud service. Then, you can install the apk file and launch the game.
-
How to Download GTA 5 on iOS Devices
-
There are two main ways to download GTA 5 on your iOS devices:
-
Download from the App Store
-
The easiest way to download GTA 5 on your iOS devices is to visit the App Store and search for GTA 5. You will need to have an Apple ID or create one if you don't have one. Then, you will need to purchase the game for $19.99 and download it on your device. You will also need to download the GTA 5 data file, which is about 3 GB in size. After that, you can launch the game from your home screen.
-
Download from the Cloud Gaming Services
-
Another way to download GTA 5 on your iOS devices is to use a cloud gaming service, such as Google Stadia, NVIDIA GeForce Now, or Microsoft xCloud. These are platforms that allow you to stream games from the cloud to your device without downloading them. You will need to have a subscription or a membership for these services, which vary in price and features. Then, you will need to have a compatible device and a stable internet connection. After that, you can access GTA 5 from the service's app or website and play it on your device.
-
Tips and Tricks for Playing GTA 5 on Mobile Devices
-
Playing GTA 5 on mobile devices can be challenging and fun at the same time. Here are some tips and tricks for playing it on the go:
-
Adjust the Settings and Controls
-
GTA 5 has a lot of settings and controls that you can customize according to your preference and device's performance. You can adjust the graphics quality, sound volume, camera angle, brightness, subtitles, and more. You can also change the control layout, sensitivity, vibration, and feedback. You can find these options in the pause menu under Settings.
-
Use the Online Mode and Social Club Features
-
GTA 5 has an online mode called GTA Online, where you can join other players in various missions, races, heists, deathmatches, or freemode events. You can also customize your character's appearance, skills, vehicles, weapons, properties, and businesses. To access GTA Online, you will need to have a Rockstar Games Social Club account and an internet connection. You can find this option in the pause menu under Online.
-
The Social Club also offers other features that enhance your gaming experience, such as leaderboards, stats, achievements, crews, friends, messages, screenshots, videos, and more. You can access these features from the pause menu under Social Club or from the Rockstar Games website or app.
-
Explore the Open World and Complete the Missions
-
GTA 5 has a vast and diverse open world that you can explore by foot, by car, by bike, by boat, by plane, or by helicopter. You can also interact with various characters, objects, and activities in the world, such as robbing stores, playing golf, racing cars, parachuting, hunting animals, or watching TV.
-
The game also has a compelling story mode that follows the lives of three protagonists: Michael, a retired bank robber who is unhappy with his family; Franklin, a young street hustler who wants to make it big; and Trevor, a psychotic drug dealer who lives in a trailer park. You can switch between them at any time during the game, creating different perspectives and outcomes.
-
To progress in the story mode, you will need to complete various missions that involve driving, shooting, stealth, planning, and teamwork. You can also choose how to approach each mission, such as being loud or quiet, aggressive or passive, or using different vehicles or weapons. You can find these missions on the map or by contacting the characters.
-
Conclusion
-
GTA 5 is an amazing game that you can enjoy on your mobile devices. You can download it from the official Rockstar Games website, the Epic Games Store app, the BlueStacks App Player, the App Store, or the cloud gaming services. You can also customize the settings and controls, use the online mode and social club features, and explore the open world and complete the missions. GTA 5 is a game that will keep you entertained for hours and hours.
-
FAQs
-
Here are some frequently asked questions about GTA 5 on mobile devices:
-
Q: How much space does GTA 5 take on my device?
-
A: GTA 5 takes about 8 GB of space on your device, plus another 3 GB for the data file. You will need to have enough free space on your device before downloading and installing the game.
-
Q: Can I play GTA 5 offline on my device?
-
A: Yes, you can play GTA 5 offline on your device. However, you will need to have an internet connection to download and install the game, as well as to access some features such as GTA Online and Social Club.
-
Q: Can I play GTA 5 with my friends on my device?
-
A: Yes, you can play GTA 5 with your friends on your device. You can join them in GTA Online or invite them to your game session. You will need to have a Rockstar Games Social Club account and an internet connection to do so.
-
Q: Can I transfer my GTA 5 progress from my PC or console to my device?
-
A: Yes, you can transfer your GTA 5 progress from your PC or console to your device. You will need to have a Rockstar Games Social Club account and link it to your PC or console account. Then, you will need to log in with the same account on your device and choose to sync your progress.
-
Q: Can I use cheats or mods on GTA 5 on my device?
-
A: No, you cannot use cheats or mods on GTA 5 on your device. Cheats and mods are not supported by Rockstar Games and may cause errors or bans on your account. You should only play GTA 5 on your device as intended by the developers.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/ .md b/spaces/1phancelerku/anime-remove-background/ .md
deleted file mode 100644
index 2165ff6319da142d979cd0413f348f93f58ffaa0..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/ .md
+++ /dev/null
@@ -1,130 +0,0 @@
-
-
Скачать сканворд фан: как развлечься и поднять свой уровень знаний
-
Вы любите решать сканворды, но не хотите тратить деньги на печатные издания или скачивать разные приложения? Тогда вам понравится игра сканворд фан, которая предлагает вам бесконечное количество бесплатных сканвордов на любой вкус и уровень сложности. В этой статье мы расскажем, что такое сканворд фан, как его скачать и как решать сканворды в нём, а также какие преимущества он даёт для вашего развития и отдыха.
Сканворд фан - это приложение для Android и iOS, которое позволяет вам решать сканворды на своём телефоне или планшете. Сканворд - это вид кроссворда, в котором вопросы расположены внутри сетки, а ответы записываются по горизонтали или вертикали. Сканворды могут быть по разным темам, например, по истории, географии, культуре, спорту, науке и т.д.
-
Сканворд фан - это не просто коллекция сканвордов, а целая игра, которая имеет множество особенностей и возможностей для пользователей. Давайте рассмотрим их подробнее.
-
Особенности игры сканворд фан
-
Бесплатные сканворды каждый день
Бесплатные сканворды каждый день
-
В игре сканворд фан вы не ограничены в количестве сканвордов, которые можете решать. Каждый день вам доступны новые сканворды разной сложности и тематики. Вы можете выбирать те, которые вам интересны, и пропускать те, которые вам не нравятся. Вы также можете вернуться к решению пропущенных или незавершённых сканвордов в любое время.
-
скачать сканворд фан бесплатно
-скачать сканворд фан на андроид
-скачать сканворд фан мод
-скачать сканворд фан полная версия
-скачать сканворд фан без интернета
-скачать сканворд фан взлом
-скачать сканворд фан игра
-скачать сканворд фан апк
-скачать сканворд фан новая версия
-скачать сканворд фан онлайн
-скачать сканворд фан отзывы
-скачать сканворд фан для пк
-скачать сканворд фан на русском
-скачать сканворд фан обновление
-скачать сканворд фан прохождение
-скачать сканворд фан green ball studio
-скачать сканворд фан установить
-скачать сканворд фан решение
-скачать сканворд фан лучшая игра
-скачать сканворд фан головоломки
-скачать сканворд фан кроссворды
-скачать сканворд фан словарь
-скачать сканворд фан подсказки
-скачать сканворд фан темы
-скачать сканворд фан настройки
-скачать сканворд фан бонусы
-скачать сканворд фан мини игры
-скачать сканворд фан полиглот
-скачать сканворд фан судоку
-скачать сканворд фан пятнашки
-скачать сканворд фан эрудиция
-скачать сканворд фан быстрый ум
-скачать ска
-
Три темы на выбор
-
В игре сканворд фан вы можете выбирать тему, по которой хотите решать сканворды. Есть три темы на выбор: общая, спорт и кино. В общей теме вы найдёте сканворды по разным областям знаний, в спортивной - по разным видам спорта и известным спортсменам, а в кинематографической - по разным фильмам и актёрам. Вы можете менять тему в любой момент или решать сканворды по всем трем темам.
-
Большое количество настроек
-
В игре сканворд фан вы можете настроить игру под свои предпочтения и удобство. Вы можете изменять размер шрифта, цвет фона, язык интерфейса, звук и музыку. Вы также можете включать или отключать автоматическое заполнение букв, подсветку ошибок, подсказки и статистику. Вы можете сохранять свой прогресс в игре и синхронизировать его с другими устройствами через облако.
-
Бонусы за решение группы сканвордов
-
В игре сканворд фан вы не только получаете удовольствие от решения сканвордов, но и зарабатываете бонусы за свои успехи. За каждый решённый сканворд вы получаете монеты, которые можно потратить на подсказки или мини-игры. А если вы решите группу из пяти сканвордов одной темы, вы получите дополнительный бонус - золотую монету, которая даёт вам доступ к специальному сканворду с большим количеством монет за решение.
-
Мини-игры для разнообразия
-
В игре сканворд фан вы можете не только решать сканворды, но и играть в разные мини-игры, которые помогут вам размять мозги и отдохнуть от сканвордов. Есть четыре мини-игры на выбор: анаграмма, слова из слова, судоку и пятнашки. В анаграмме вам нужно составить слово из заданных букв, в словах из слова - найти все возможные слова из одного большого слова, в судоку - заполнить сетку цифрами так, чтобы они не повторялись по строкам, столбцам и квадратам, а в пятнашках - переместить плитки так, чтобы они шли по порядку от 1 до 15. За каждую мини-игру вы также получает
За каждую мини-игру вы также получаете монеты, которые можно использовать в игре сканворд фан. Мини-игры доступны в любое время и не зависят от темы сканвордов.
-
Как скачать сканворд фан на свой телефон или планшет?
-
Скачать игру сканворд фан на своё устройство очень просто. В зависимости от того, какая у вас операционная система, вы можете сделать это по-разному.
-
Для Android-устройств
-
Если у вас есть телефон или планшет на базе Android, то вам нужно сделать следующее:
-
-
Откройте приложение Google Play на своём устройстве.
-
В поисковой строке введите "сканворд фан" или "scanword fun".
-
Найдите игру сканворд фан среди результатов поиска и нажмите на неё.
-
Нажмите на кнопку "Установить" и дождитесь окончания загрузки и установки игры.
-
Нажмите на кнопку "Открыть" или найдите иконку игры на своём рабочем столе и запустите её.
-
-
Поздравляем, вы успешно скачали и установили игру сканворд фан на своё Android-устройство!
-
Для iOS-устройств
-
Если у вас есть iPhone или iPad, то вам нужно сделать следующее:
-
-
Откройте приложение App Store на своём устройстве.
-
В поисковой строке введите "сканворд фан" или "scanword fun".
-
Найдите игру сканворд фан среди результатов поиска и нажмите на неё.
-
Нажмите на кнопку "Загрузить" и дождитесь окончания загрузки и установки игры.
-
Нажмите на кнопку "Открыть" или найдите иконку игры на своём рабочем столе и запустите её.
-
-
Поздравляем, вы успешно скачали и установили игру сканворд фан на своё iOS-устройство!
Как решать сканворды в игре сканворд фан?
-
Решать сканворды в игре сканворд фан очень просто и увлекательно. Вам нужно только следовать нескольким шагам:
-
Выберите уровень сложности
-
В игре сканворд фан вы можете выбирать уровень сложности сканвордов, который вам подходит. Есть три уровня на выбор: легкий, средний и сложный. Легкий уровень подойдёт для начинающих или тех, кто хочет просто расслабиться. Средний уровень подойдёт для тех, кто хочет немного подумать и проверить свои знания. Сложный уровень подойдёт для тех, кто любит сложные задачи и хочет поставить себе вызов. Вы можете менять уровень сложности в любой момент или решать сканворды разных уровней.
-
Введите буквы в ячейки
-
В игре сканворд фан вы можете вводить буквы в ячейки сетки с помощью клавиатуры или пальца. Вы можете переключаться между горизонтальным и вертикальным направлением ввода букв с помощью кнопки в правом нижнем углу экрана. Вы также можете перемещаться по сетке с помощью стрелок или свайпов. Если вы введёте правильную букву, она останется в ячейке, а если нет, она исчезнет.
-
Пользуйтесь подсказками
-
В игре сканворд фан вы можете пользоваться подсказками, если застряли на каком-то вопросе или слове. Есть три типа подсказок на выбор: открыть букву, открыть слово или открыть сканворд. Открыть букву позволяет вам открыть одну букву в любом слове. Открыть слово позволяет вам открыть целое слово по горизонтали или вертикали. Открыть сканворд позволяет вам открыть все слова в сканворде. Вы можете использовать подсказки за монеты, которые вы зарабатываете за решение сканвордов или мини-игр.
-
Какие преимущества даёт игра сканворд фан?
-
Игра сканворд фан не только развлекает вас, но и приносит много пользы для вашего развития и отдыха. Давайте рассмотрим некоторые из них.
-
Развивает эрудицию и быстроту мышления
-
Играя в сканворд фан, вы тренируете свою эрудицию и быстроту мышления. Вы узнаёте много нового и интересного по разным темам, проверяете свои знания и память, а также придумываете слова по буквам и определениям. Это помогает вам расширить свой словарный запас, улучшить свою орфографию и грамматику, а также повысить свою концентрацию и логику.
-
Расширяет к
Расширяет кругозор и логическое мышление
-
Играя в сканворд фан, вы расширяете свой кругозор и логическое мышление. Вы знакомитесь с разными фактами и событиями из истории, географии, культуры, спорта, науки и т.д. Вы также учитеся анализировать и сопоставлять разную информацию, делать выводы и гипотезы, находить связи и закономерности. Это помогает вам быть более образованным и умным, а также улучшить свои навыки решения проблем и принятия решений.
-
Помогает расслабиться и отдохнуть
-
Играя в сканворд фан, вы помогаете себе расслабиться и отдохнуть. Вы можете играть в сканворд фан в любое время и в любом месте, когда вам нужно снять стресс или скоротать время. Вы можете наслаждаться красивым дизайном игры, приятной музыкой и звуками, а также интересными мини-играми. Вы также можете получать удовлетворение от своих достижений, бонусов и наград. Игра сканворд фан - это отличный способ развлечься и поднять себе настроение.
-
Заключение
-
Игра сканворд фан - это уникальное приложение для любителей сканвордов и не только. Оно предлагает вам бесконечное количество бесплатных сканвордов разной сложности и тематики, а также множество особенностей и возможностей для вашего развития и отдыха. Вы можете скачать игру сканворд фан на свой телефон или планшет по ссылкам ниже и начать играть прямо сейчас. Не упустите шанс развлечься и поднять свой уровень знаний с игрой сканворд фан!
В этом разделе мы ответим на некоторые часто задаваемые вопросы о игре сканворд фан.
-
Можно ли играть в сканворд фан без интернета?
-
Да, можно. Игра сканворд фан не требует постоянного подключения к интернету. Вы можете играть в неё в оффлайн-режиме, если вы уже загрузили нужные сканворды или мини-игры. Однако, для синхронизации вашего прогресса, получения новых сканвордов или доступа к специальным предложениям вам нужно подключиться к интернету.
-
Как получить больше монет в игре сканворд ф
Как получить больше монет в игре сканворд фан?
-
Есть несколько способов получить больше монет в игре сканворд фан. Во-первых, вы можете зарабатывать монеты за решение сканвордов или мини-игр. Во-вторых, вы можете получать бонусы за решение группы сканвордов одной темы или за решение специального сканворда. В-третьих, вы можете смотреть рекламу или участвовать в акциях, чтобы получить дополнительные монеты. В-четвёртых, вы можете купить монеты за реальные деньги, если вам не хватает их для подсказок или мини-игр.
-
Как сбросить свой прогресс в игре сканворд фан?
-
Если вы хотите сбросить свой прогресс в игре сканворд фан и начать игру заново, вы можете сделать это в настройках игры. Для этого вам нужно сделать следующее:
-
-
Откройте меню игры, нажав на три полоски в левом верхнем углу экрана.
-
Выберите пункт "Настройки".
-
Пролистайте вниз до пункта "Сбросить прогресс".
-
Нажмите на кнопку "Сбросить" и подтвердите своё действие.
-
-
Обратите внимание, что сброс прогресса удалит все ваши решённые сканворды, монеты, бонусы и настройки. Вы не сможете восстановить их обратно. Поэтому сбросьте прогресс только в том случае, если вы уверены в своём решении.
-
Как связаться с разработчиками игры сканворд фан?
-
Если у вас есть какие-то вопросы, пожелания или проблемы по игре сканворд фан, вы можете связаться с разработчиками игры по электронной почте или через социальные сети. Вот их контакты:
-
-
Электронная почта: scanwordfun@gmail.com
-
Facebook: https://www.facebook.com/scanwordfun
-
VK: https://vk.com/scanwordfun
-
Instagram: https://www.instagram.com/scanwordfun
-
-
Разработчики игры сканворд фан будут рады услышать ваше мнение и помочь вам в случае необходимости.
-
Как оставить отзыв о игре сканворд фан?
-
Если вам нравится игра сканворд фан и вы хотите поделиться своими впечатлениями с другими пользователями, вы можете оставить отзыв о игре на Google Play или App Store. Для этого вам нужно сделать следующее:
-
-
Откройте приложение Google Play или App Store на своём устройстве.
-
Найдите игру сканворд фан среди своих установленных приложений и нажмите на неё.
-
Пролистайте вниз до раздела "Оценки и отзывы".
-
Нажмите на кнопку "Написать отзыв" или "Оценить".
Нажмите на кнопку "Написать отзыв" или "Оценить".
-
Выберите количество звёзд, которое вы хотите поставить игре, и напишите свой отзыв в текстовом поле.
-
Нажмите на кнопку "Отправить" или "Готово".
-
-
Ваш отзыв будет опубликован на Google Play или App Store и будет виден другим пользователям. Разработчики игры сканворд фан также будут рады прочитать ваш отзыв и учесть ваше мнение.
-
Спасибо, что выбрали игру сканворд фан! Надеемся, что вам понравилась наша статья и вы узнали много полезной информации. Если у вас есть ещё вопросы, пишите нам на scanwordfun@gmail.com или в социальных сетях. Желаем вам приятной игры и удачи в решении сканвордов!
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download and Watch National Treasure 2 Book of Secrets in Hindi Dubbed 480p Filmyzilla - High Definition and Low Size.md b/spaces/1phancelerku/anime-remove-background/Download and Watch National Treasure 2 Book of Secrets in Hindi Dubbed 480p Filmyzilla - High Definition and Low Size.md
deleted file mode 100644
index 629a34ba6d8790be3ba87c55dd772174947d447d..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download and Watch National Treasure 2 Book of Secrets in Hindi Dubbed 480p Filmyzilla - High Definition and Low Size.md
+++ /dev/null
@@ -1,94 +0,0 @@
-
-
National Treasure 2 Full Movie in Hindi Download 480p Filmyzilla
-
If you are a fan of action-adventure movies with historical mysteries and puzzles, you might have enjoyed watching National Treasure (2004) and its sequel National Treasure: Book of Secrets (2007). The movies star Nicolas Cage as Benjamin Franklin Gates, a treasure hunter who follows clues hidden in historical artifacts and documents to uncover secrets and conspiracies. In this article, we will review the second movie in the franchise, National Treasure: Book of Secrets, and tell you how you can download it in Hindi for free from Filmyzilla, a popular website that offers pirated movies.
-
Movie Review
-
National Treasure: Book of Secrets is a 2007 American action-adventure film directed by Jon Turteltaub and produced by Jerry Bruckheimer. It is a sequel to the 2004 film National Treasure and is the second film of the National Treasure franchise. The film stars Nicolas Cage in the lead role, Jon Voight, Harvey Keitel, Ed Harris, Diane Kruger, Justin Bartha, Bruce Greenwood and Helen Mirren.
-
national treasure 2 full movie in hindi download 480p filmyzilla
The plot follows Ben Gates as he tries to prove the innocence of his great-great-grandfather Thomas Gates, who is accused of being involved in the assassination of Abraham Lincoln by a rival treasure hunter Mitch Wilkinson (Ed Harris). To do so, he has to find a lost city of gold that is linked to a secret book that contains the history of the United States. Along the way, he has to deal with his estranged parents (Jon Voight and Helen Mirren), his ex-girlfriend (Diane Kruger), his best friend (Justin Bartha), and an FBI agent (Harvey Keitel).
-
The movie is a fast-paced and fun-filled adventure that takes the viewers to various locations such as Paris, London, Washington D.C., Mount Rushmore, and the Black Hills. The movie has some impressive action sequences, such as a car chase in London, a kidnapping at Buckingham Palace, a break-in at the White House, and a cave exploration at Mount Rushmore. The movie also has some humorous moments, such as Ben's awkward interactions with his parents and his ex-girlfriend, Riley's sarcastic comments, and Ben's encounter with the President of the United States (Bruce Greenwood).
-
However, the movie also has some flaws that might affect its appeal to some viewers. The movie is very similar to its predecessor in terms of its plot structure, characters, and themes. The movie relies heavily on historical inaccuracies, coincidences, and conveniences to move the story forward. The movie also has some logical inconsistencies and plot holes that might raise some questions among the viewers. For example, how did Mitch Wilkinson get access to John Wilkes Booth's diary? How did Ben manage to sneak into Buckingham Palace and the White House? How did Ben know where to find the entrance to the lost city of gold?
-
national treasure book of secrets hindi dubbed 480p download
-national treasure 2 full movie in hindi watch online filmyzilla
-national treasure 2 hindi 480p free download
-national treasure book of secrets full movie download in hindi 480p
-national treasure 2 dual audio 480p filmyzilla
-national treasure book of secrets hindi 480p filmywap
-national treasure 2 full movie in hindi download 720p filmyzilla
-national treasure book of secrets full movie in hindi watch online
-national treasure 2 hindi dubbed movie download filmyzilla
-national treasure book of secrets dual audio 480p download
-national treasure 2 full movie in hindi download 300mb filmyzilla
-national treasure book of secrets full movie online free in hindi
-national treasure 2 hindi dubbed 480p download filmywap
-national treasure book of secrets full movie download in hindi hd
-national treasure 2 full movie in hindi download mp4 filmyzilla
-national treasure book of secrets full movie in hindi free download
-national treasure 2 full movie in hindi online filmyzilla
-national treasure book of secrets full movie in hindi 480p worldfree4u
-national treasure 2 full movie download in hindi filmyzilla
-national treasure book of secrets full movie in hindi dubbed download
-national treasure 2 full movie in hindi hd filmyzilla
-national treasure book of secrets full movie in hindi filmyzilla
-national treasure 2 full movie in hindi free download filmyzilla
-national treasure book of secrets full movie in hindi hd online
-national treasure 2 full movie in hindi watch online free filmyzilla
-national treasure book of secrets full movie in hindi dailymotion
-national treasure 2 full movie in hindi download filmywap
-national treasure book of secrets full movie in hindi youtube
-national treasure 2 full movie in hindi dubbed watch online filmyzilla
-national treasure book of secrets full movie download filmyzilla
-national treasure 2 full movie in hindi download hd filmyzilla
-national treasure book of secrets full movie watch online free hd
-national treasure 2 full movie in hindi download mkv filmyzilla
-national treasure book of secrets full movie online with english subtitles
-national treasure 2 full movie in hindi download khatrimaza filmyzilla
-national treasure book of secrets full movie free download mp4
-national treasure 2 full movie in hindi download pagalworld filmyzilla
-national treasure book of secrets full movie online free no sign up
-national treasure 2 full movie in hindi download moviesflix filmyzilla
-national treasure book of secrets full movie online free putlockers
-national treasure 2 full movie in hindi download coolmoviez filmyzilla
-national treasure book of secrets full movie online free dailymotion
-national treasure 2 full movie in hindi download worldfree4u filmyzilla
-national treasure book of secrets full movie online free youtube
-national treasure 2 full movie in hindi download bolly4u filmyzilla
-national treasure book of secrets full movie online free reddit
-national treasure 2 full movie in hindi download skymovieshd filmyzilla
-
The movie is not meant to be taken seriously or realistically. It is meant to be an entertaining and escapist fantasy that appeals to the fans of history, mystery, and adventure. The movie does not have any deep or profound messages or themes. It is simply a popcorn flick that delivers what it promises: action, humor, romance, and treasure.
-
Movie Trivia
-
Here are some interesting facts and behind-the-scenes stories about National Treasure: Book of Secrets that you might not know:
-
-
The movie was originally titled National Treasure 2: The Book of Secrets.
-
The movie was filmed in various locations such as France, England, South Dakota, Maryland, Virginia, Washington D.C., California, New York City.
-
The movie features several historical figures and events such as Abraham Lincoln, John Wilkes Booth, Mary Surratt, Samuel Mudd, Edwin Stanton, the Civil War, the Knights of the Golden Circle, the Resolute desks, the Statue of Liberty, the Mount Vernon Ladies' Association, and Cibola.
-
The movie also references several fictional works such as The Da Vinci Code, The Wizard of Oz, and The Adventures of Tom Sawyer.
-
The movie features several real-life artifacts and documents such as the Booth diary, the missing pages of the diary, the cipher wheel, the playbill, the twin Resolute desks, the Book of Secrets, and the President's seal.
-
The movie also features several fictional artifacts and documents such as the plank, the pipe, the scale model of Paris, the letter from Queen Victoria, the clue on the Statue of Liberty, and the map on the Resolute desk.
-
The movie had a budget of $130 million and grossed $457 million worldwide, making it a commercial success. It received mixed reviews from critics and audiences, with some praising its entertainment value and others criticizing its historical inaccuracies and implausibilities.
-
-
Filmyzilla Website
-
If you want to watch National Treasure: Book of Secrets in Hindi for free, you might be tempted to visit Filmyzilla, a popular website that offers free downloads of movies in various languages and formats. Filmyzilla is one of the many websites that provide pirated content to users who want to avoid paying for subscriptions or tickets. Filmyzilla has a large collection of movies from Hollywood, Bollywood, Tollywood, and other industries. You can find movies in genres such as action, comedy, drama, horror, thriller, romance, sci-fi, fantasy, animation, and more. You can also find movies in different resolutions such as 480p, 720p, 1080p, and 4K. You can download movies in formats such as MP4, MKV, AVI, and WMV.
-
Filmyzilla claims to provide high-quality and fast downloads of movies to its users. It also claims to update its library regularly with new releases and old classics. It has a user-friendly interface that allows you to search for movies by name, genre, year, or language. It also has a section for trending movies and a request option for users who want to request a specific movie.
-
However, before you visit Filmyzilla or any other similar website, you should be aware of some important facts and risks. First of all, downloading or streaming pirated content is illegal and unethical. It violates the intellectual property rights of the creators and distributors of the movies. It also harms the film industry by reducing its revenue and profits. By using Filmyzilla or any other pirated website, you are supporting piracy and contributing to its negative impact on the entertainment sector.
-
Secondly, using Filmyzilla or any other pirated website is unsafe and risky for your device and data. These websites often contain malware, viruses, spyware, adware, and other harmful software that can infect your device and compromise your security and privacy. These websites also display annoying and intrusive ads that can redirect you to malicious or inappropriate websites that can harm you further. These websites also require you to disable your antivirus or firewall software or allow unknown sources to access your device, which can expose you to more dangers.
-
Therefore, we strongly advise you to avoid using Filmyzilla or any other pirated website to download or stream National Treasure: Book of Secrets in Hindi or any other movie. Instead, we recommend you to use legal and safe platforms such as Netflix, Amazon Prime Video, Disney Plus, or YouTube to watch National Treasure: Book of Secrets in Hindi or any other language. These platforms are legal and safe to use and they offer high-quality and fast streaming of movies. They also have a variety of movies and shows to choose from and they respect the rights of the creators and distributors of the movies. You might have to pay a subscription fee or a rental fee to use these platforms, but it is worth it for the quality and security they provide.
-
Conclusion
-
National Treasure: Book of Secrets is a movie that can be enjoyed by anyone who likes history, mystery, and adventure. It is a sequel to the 2004 movie National Treasure and it follows the same formula of clues, puzzles, and treasure hunting. The movie has some exciting action scenes, some funny moments, and some interesting historical references. The movie also has some flaws, such as its historical inaccuracies, its implausibilities, and its similarities to its predecessor. The movie is not meant to be taken seriously or realistically. It is meant to be an entertaining and escapist fantasy that appeals to the fans of the genre.
-
If you want to watch National Treasure: Book of Secrets in Hindi for free, you might be tempted to visit Filmyzilla, a website that offers free downloads of pirated movies. However, we strongly advise you to avoid using Filmyzilla or any other pirated website to download or stream movies. These websites are illegal and unethical and they harm the film industry by violating the intellectual property rights of the creators and distributors of the movies. These websites are also unsafe and risky for your device and data as they contain malware, viruses, and other harmful software that can infect your device and compromise your security and privacy. These websites also display annoying and intrusive ads that can redirect you to malicious or inappropriate websites that can harm you further.
-
Therefore, we recommend you to use legal and safe platforms such as Netflix, Amazon Prime Video, Disney Plus, or YouTube to watch National Treasure: Book of Secrets in Hindi or any other language. These platforms are legal and safe to use and they offer high-quality and fast streaming of movies. They also have a variety of movies and shows to choose from and they respect the rights of the creators and distributors of the movies. You might have to pay a subscription fee or a rental fee to use these platforms, but it is worth it for the quality and security they provide.
-
FAQs
-
Here are some frequently asked questions and answers about National Treasure: Book of Secrets and Filmyzilla:
-
-
Q: Is National Treasure: Book of Secrets based on a true story?
-A: No, National Treasure: Book of Secrets is not based on a true story. It is a fictional story that uses some historical figures and events as inspiration.
-
Q: Is there a third movie in the National Treasure franchise?
-A: Yes, there is a third movie in the National Treasure franchise in development. It was announced in January 2020 that Chris Bremner was hired to write the script for National Treasure 3. However, there is no official release date or cast information yet.
-
Q: Is Filmyzilla legal?
-A: No, Filmyzilla is not legal. It is a website that offers free downloads of pirated movies that violate the intellectual property rights of the creators and distributors of the movies.
-
Q: Is Filmyzilla safe?
-A: No, Filmyzilla is not safe. It is a website that contains malware, viruses, and other harmful software that can infect your device and compromise your security and privacy. It also displays annoying and intrusive ads that can redirect you to malicious or inappropriate websites that can harm you further.
-
Q: What are some alternatives to Filmyzilla?
-A: Some alternatives to Filmyzilla are Netflix, Amazon Prime Video, Disney Plus, or YouTube. These are legal and safe platforms that offer high-quality and fast streaming of movies. They also have a variety of movies and shows to choose from and they respect the rights of the creators and distributors of the movies.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Dragon Ball Z Kakarot APK - Download and Play the Amazing DBZ Game on Android.md b/spaces/1phancelerku/anime-remove-background/Dragon Ball Z Kakarot APK - Download and Play the Amazing DBZ Game on Android.md
deleted file mode 100644
index 5563894eeed2020382b221d17c3ddb9a42e241b2..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Dragon Ball Z Kakarot APK - Download and Play the Amazing DBZ Game on Android.md
+++ /dev/null
@@ -1,101 +0,0 @@
-
-
Dragon Ball Z Kakarot: How to Download and Play on Android
-
If you are a fan of the Dragon Ball anime and manga series, you might have heard of Dragon Ball Z Kakarot, a role-playing game that lets you relive the epic saga of Goku and his friends. In this game, you can explore the vast open world of Dragon Ball, fight against powerful enemies, and experience the story from different perspectives. But did you know that you can also play this game on your Android device? In this article, we will show you how to download and play Dragon Ball Z Kakarot on Android using APKPure, a reliable and safe source for Android games.
-
What is Dragon Ball Z Kakarot?
-
Dragon Ball Z Kakarot is a game developed by CyberConnect2 and published by Bandai Namco Entertainment in 2020. It is based on the Dragon Ball Z anime series, which follows the adventures of Goku, a Saiyan warrior who protects Earth from various threats. The game covers the four main sagas of the series: Saiyan Saga, Frieza Saga, Cell Saga, and Buu Saga. You can play as Goku and other characters, such as Vegeta, Gohan, Piccolo, and Trunks.
The game features a large open world that you can explore by flying, driving, fishing, eating, and more. You can also interact with various characters from the series, complete side quests, collect items, and upgrade your skills. The game also has a dynamic combat system that allows you to unleash powerful attacks and transformations. You can also use support characters to assist you in battle.
-
Why download Dragon Ball Z Kakarot from APKPure?
-
APKPure is a website that offers free and safe downloads of Android games and apps. You can find thousands of games from different genres and categories on APKPure, including popular titles like PUBG Mobile, Genshin Impact, Among Us, and more. You can also discover new and trending games that are not available on Google Play Store.
-
One of the advantages of using APKPure is that it provides fast and secure downloads. You don't need to worry about viruses or malware when downloading from APKPure. You also don't need to register or sign up to use APKPure. You can simply search for the game you want and download it with one click.
-
Another benefit of using APKPure is that it supports multiple languages and regions. You can choose the language and region that suits you best when browsing APKPure. You can also find games that are compatible with your device's specifications and preferences.
-
How to download and install Dragon Ball Z Kakarot from APKPure
-
To download and install Dragon Ball Z Kakarot from APKPure, you need to follow these simple steps:
Type "dragon ball z kakarot" in the search box and press enter.
-
Select the game from the list of results.
-
Click on the "Download" button and wait for the download to finish.
-
Once the download is complete, open the file manager app on your device and locate the downloaded file.
-
Tap on the file and allow the installation of unknown sources if prompted.
-
Follow the instructions on the screen to install the game.
-
Launch the game and enjoy!
-
-
How to play Dragon Ball Z Kakarot on Android
-
To play Dragon Ball Z Kakarot on Android, you need to have a compatible device and a stable internet connection. The game requires at least 4 GB of RAM and 5 GB of free storage space. You also need to have Android 7.0 or higher as your operating system. The game may not run smoothly on low-end devices or devices with insufficient memory. The game is easy to play with touch controls. You can move your character by using the virtual joystick on the left side of the screen. You can also use the buttons on the right side of the screen to perform actions such as attacking, dodging, charging, and using items. You can switch between characters by tapping on their icons on the top left corner of the screen. You can also access the menu by tapping on the three dots on the top right corner of the screen. The game follows the story of Dragon Ball Z, so you can expect to encounter many familiar scenes and characters. You can also explore the world and find hidden secrets and collectibles. You can level up your characters by completing quests, fighting enemies, and training. You can also customize your characters by equipping skills, items, and costumes.
Gameplay tips and tricks for beginners and advanced players
-
Here are some gameplay tips and tricks that can help you enjoy Dragon Ball Z Kakarot more:
-
-
Use the map to find your objectives and waypoints. You can also use the map to fast travel to different locations.
-
Collect Z orbs and D medals as you explore. Z orbs are used to upgrade your skills, while D medals are used to unlock new skills.
-
Interact with NPCs and complete side quests. They can give you rewards such as items, money, and experience.
-
Use the community board to activate bonuses and perks. You can place different characters in different communities and increase their friendship levels.
-
Use the training grounds to learn new skills and techniques. You can also fight against past enemies and bosses to test your skills.
-
Use the cooking system to prepare meals that boost your stats and health. You can also eat at restaurants or campsites for temporary buffs.
-
Use the transformation system to gain an edge in battle. You can transform into different forms such as Super Saiyan, Super Saiyan 2, Super Saiyan 3, and more.
-
Use the support system to get help from your allies. You can call them to assist you in combat or switch with them if you are low on health.
-
Use the combo system to deal more damage and stun your enemies. You can chain different attacks and skills together for devastating effects.
-
Use the ki blast system to attack from a distance or break your enemy's guard. You can also charge your ki by holding down the attack button.
-
-
Conclusion
-
Dragon Ball Z Kakarot is a game that every Dragon Ball fan should try. It is a game that lets you experience the story of Dragon Ball Z in a new and immersive way. You can download and play this game on your Android device by using APKPure, a website that offers free and safe downloads of Android games and apps. APKPure has many advantages such as fast and secure downloads, multiple languages and regions support, and compatibility with various devices. To play Dragon Ball Z Kakarot on Android, you just need to follow the steps we mentioned above and enjoy the game.
-
If you have any questions or feedback about Dragon Ball Z Kakarot or APKPure, feel free to leave a comment below. We would love to hear from you!
-
dragon ball z kakarot mobile apk download
-dragon ball z kakarot android apk free download
-dragon ball z kakarot apk obb download for android
-dragon ball z kakarot apk mod download
-dragon ball z kakarot apk ios download
-dragon ball z kakarot game download apkpure
-dragon ball z kakarot apk data download
-dragon ball z kakarot apk offline download
-dragon ball z kakarot apk latest version download
-dragon ball z kakarot apk full game download
-dragon ball z kakarot apkpure free download
-dragon ball z kakarot apk+obb free download
-dragon ball z kakarot apk unlimited money download
-dragon ball z kakarot apk rexdl download
-dragon ball z kakarot apk revdl download
-dragon ball z kakarot apkpure mod download
-dragon ball z kakarot apkpure hack download
-dragon ball z kakarot apkpure cheats download
-dragon ball z kakarot apkpure unlocked download
-dragon ball z kakarot apkpure update download
-dragon ball z kakarot apkpure new version download
-dragon ball z kakarot apkpure online download
-dragon ball z kakarot apkpure offline mode download
-dragon ball z kakarot apkpure english version download
-dragon ball z kakarot apkpure no verification download
-how to download dragon ball z kakarot apk on android
-how to install dragon ball z kakarot apk on android
-how to play dragon ball z kakarot apk on android
-how to get dragon ball z kakarot apk for free on android
-how to update dragon ball z kakarot apk on android
-how to run dragon ball z kakarot apk on android
-how to fix dragon ball z kakarot apk on android
-how to hack dragon ball z kakarot apk on android
-how to mod dragon ball z kakarot apk on android
-how to cheat in dragon ball z kakarot apk on android
-how to unlock all characters in dragon ball z kakarot apk on android
-how to save game in dragon ball z kakarot apk on android
-how to change language in dragon ball z kakarot apk on android
-how to connect controller in dragon ball z kakarot apk on android
-how to play online in dragon ball z kakarot apk on android
-is there a dragon ball z kakarot apk for android
-is dragon ball z kakarot apk real or fake for android
-is dragon ball z kakarot apk safe or virus for android
-is dragon ball z kakarot apk worth it for android
-is dragon ball z kakarot apk compatible with my device for android
-is dragon ball z kakarot apk legal or illegal for android
-is dragon ball z kakarot apk official or unofficial for android
-is dragon ball z kakarot apk original or modded for android
-is dragon ball z kakarot apk working or not for android
-
Frequently Asked Questions
-
Here are some frequently asked questions about Dragon Ball Z Kakarot and APKPure:
-
-
Is Dragon Ball Z Kakarot free to play? No, Dragon Ball Z Kakarot is not a free-to-play game. It is a paid game that costs $59.99 on Steam and $39.99 on PlayStation 4 and Xbox One. However, you can download it for free from APKPure if you have an Android device.
-
Is Dragon Ball Z Kakarot online or offline? Dragon Ball Z Kakarot is mainly an offline game that does not require an internet connection to play. However, some features such as online events, leaderboards, achievements, and updates may require an internet connection.
-
Is Dragon Ball Z Kakarot multiplayer or single-player? Dragon Ball Z Kakarot is a single-player game that does not have a multiplayer mode. However, you can play with other players online in some events such as raids, boss battles, and tournaments.
-
Is APKPure safe to use? Yes, APKPure is safe to use as it does not contain any viruses or malware. APKPure also verifies the authenticity and integrity of the files it provides. You can trust APKPure to download and install Android games and apps without any worries.
-
How to update Dragon Ball Z Kakarot on Android? To update Dragon Ball Z Kakarot on Android, you need to visit APKPure website again and check if there is a new version available. If there is, you can download and install it over the existing one. You can also enable the auto-update feature on APKPure app to get the latest updates automatically.
- With the utilization of the
- llama-cpp-python
- package, we are excited to introduce the GGUF model hosted in the Hugging
- Face Docker Spaces, made accessible through an OpenAI-compatible API. This
- space includes comprehensive API documentation to facilitate seamless
- integration.
-
- If you find this resource valuable, your support in the form of starring
- the space would be greatly appreciated. Your engagement plays a vital role
- in furthering the application for a community GPU grant, ultimately
- enhancing the capabilities and accessibility of this space.
-
-
-
diff --git a/spaces/AIFILMS/image-to-sound-fx/app.py b/spaces/AIFILMS/image-to-sound-fx/app.py
deleted file mode 100644
index 9044639a5b247e132b3abebf90d61371ab89d806..0000000000000000000000000000000000000000
--- a/spaces/AIFILMS/image-to-sound-fx/app.py
+++ /dev/null
@@ -1,125 +0,0 @@
-import gradio as gr
-import os
-import time
-from moviepy.editor import *
-from share_btn import community_icon_html, loading_icon_html, share_js
-
-#token = os.environ.get('HF_TOKEN')
-caption = gr.Blocks.load(name="spaces/laion/CoCa")
-audio_gen = gr.Blocks.load(name="spaces/haoheliu/audioldm-text-to-audio-generation")
-
-ph_message="If you're not happy with sound result, you can manually describe the scene depicted in your image :)"
-
-def input_changes(input_img):
-
- if input_img == None:
- return manual_cap.update(value="",placeholder=ph_message), caption_output.update(value=None), sound_output.update(value=None)
- else:
- cap = caption(input_img, fn_index=0)
- print("CoCa caption: '" + cap + "' • ")
- ph_update = "CoCa caption: '" + cap + "' • "
-
- return manual_cap.update(value="",placeholder=f"{ph_update}{ph_message}"), caption_output.update(value=cap), sound_output.update(value=None)
-
-def infer(image_input, manual_caption, duration_in, seed, caption_output):
-
- print(duration_in)
- if manual_caption == "":
- cap = caption_output
- #cap = caption(image_input, fn_index=0)
- #print("CoCa caption: '" + cap + "' • ")
- #ph_update = "CoCa caption: '" + cap + "' • "
- else:
- cap = manual_caption
- print("manual caption: " + cap)
- ph_update=""
-
- sound = audio_gen(cap, duration_in, 2.5, seed, 3, fn_index=0)
-
- #return cap, sound[1], gr.Textbox.update(placeholder=f"{ph_update}{ph_message}"), gr.Group.update(visible=True)
- return cap, sound[1], gr.Group.update(visible=True)
-
-title = """
-
-
-
- Image to Sound Effect
-
-
-
- Convert an image to a corresponding sound effect generated through CoCa Image Captioning & AudioLDM
-
-
-"""
-
-article = """
-
-
-
-
-
You may also like:
-
-
-
-
-
-
-
-
-
-"""
-
-with gr.Blocks(css="style.css") as demo:
- with gr.Column(elem_id="col-container"):
-
- gr.HTML(title)
-
- input_img = gr.Image(type="filepath", elem_id="input-img")
-
- with gr.Column():
- manual_cap = gr.Textbox(label="Manual Image description (optional)", lines=3, placeholder=ph_message)
- with gr.Row():
- duration_in = gr.Slider(minimum=5, maximum=10, step=5, value=5, label="Duration")
- seed_in = gr.Slider(label="Seed", value=440, minimum=45, maximum=10000, step=1)
-
- caption_output = gr.Textbox(label="Caption", visible=False, elem_id="text-caption")
- sound_output = gr.Audio(label="Result", elem_id="sound-output")
-
- generate = gr.Button("Generate SFX from Image")
-
- with gr.Group(elem_id="share-btn-container", visible=False) as share_group:
- community_icon = gr.HTML(community_icon_html)
- loading_icon = gr.HTML(loading_icon_html)
- share_button = gr.Button("Share to community", elem_id="share-btn")
-
- gr.HTML(article)
-
- change_out = [manual_cap, caption_output, sound_output]
- input_img.change(input_changes, input_img, change_out, queue=False)
-
-
-
- generate.click(infer, inputs=[input_img, manual_cap, duration_in, seed_in, caption_output], outputs=[caption_output, sound_output, share_group], api_name="i2fx")
- share_button.click(None, [], [], _js=share_js)
-
-demo.queue(max_size=32).launch(debug=True)
diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/__init__.py b/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/__init__.py
deleted file mode 100644
index 96ccf3e709b62e0548572ea424bb03a1a67a4b2e..0000000000000000000000000000000000000000
--- a/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from .factory import list_models, create_model, create_model_and_transforms, add_model_config
-from .loss import ClipLoss, gather_features, LPLoss, lp_gather_features, LPMetrics
-from .model import CLAP, CLAPTextCfg, CLAPVisionCfg, CLAPAudioCfp, convert_weights_to_fp16, trace_model
-from .openai import load_openai_model, list_openai_models
-from .pretrained import list_pretrained, list_pretrained_tag_models, list_pretrained_model_tags,\
- get_pretrained_url, download_pretrained
-from .tokenizer import SimpleTokenizer, tokenize
-from .transform import image_transform
diff --git a/spaces/AIGText/GlyphControl/ldm/modules/ema.py b/spaces/AIGText/GlyphControl/ldm/modules/ema.py
deleted file mode 100644
index d1488d699231e6712d09c0634854ac91d3a9b603..0000000000000000000000000000000000000000
--- a/spaces/AIGText/GlyphControl/ldm/modules/ema.py
+++ /dev/null
@@ -1,80 +0,0 @@
-import torch
-from torch import nn
-
-
-class LitEma(nn.Module):
- def __init__(self, model, decay=0.9999, init_num_updates = 0, use_num_upates=True):
- super().__init__()
- if decay < 0.0 or decay > 1.0:
- raise ValueError('Decay must be between 0 and 1')
-
- self.m_name2s_name = {}
- self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))
- self.register_buffer('num_updates', torch.tensor(init_num_updates, dtype=torch.int) if use_num_upates
- else torch.tensor(-1, dtype=torch.int)) # 0
-
- for name, p in model.named_parameters():
- if p.requires_grad:
- # remove as '.'-character is not allowed in buffers
- s_name = name.replace('.', '')
- self.m_name2s_name.update({name: s_name})
- self.register_buffer(s_name, p.clone().detach().data)
-
- self.collected_params = []
-
- def reset_num_updates(self):
- del self.num_updates
- self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))
-
- def forward(self, model):
- decay = self.decay
-
- if self.num_updates >= 0:
- self.num_updates += 1
- decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))
-
- one_minus_decay = 1.0 - decay
-
- with torch.no_grad():
- m_param = dict(model.named_parameters())
- shadow_params = dict(self.named_buffers())
-
- for key in m_param:
- if m_param[key].requires_grad:
- sname = self.m_name2s_name[key]
- shadow_params[sname] = shadow_params[sname].type_as(m_param[key])
- shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))
- else:
- assert not key in self.m_name2s_name
-
- def copy_to(self, model):
- m_param = dict(model.named_parameters())
- shadow_params = dict(self.named_buffers())
- for key in m_param:
- if m_param[key].requires_grad:
- m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)
- else:
- assert not key in self.m_name2s_name
-
- def store(self, parameters):
- """
- Save the current parameters for restoring later.
- Args:
- parameters: Iterable of `torch.nn.Parameter`; the parameters to be
- temporarily stored.
- """
- self.collected_params = [param.clone() for param in parameters]
-
- def restore(self, parameters):
- """
- Restore the parameters stored with the `store` method.
- Useful to validate the model with EMA parameters without affecting the
- original optimization process. Store the parameters before the
- `copy_to` method. After validation (or model saving), use this to
- restore the former parameters.
- Args:
- parameters: Iterable of `torch.nn.Parameter`; the parameters to be
- updated with the stored parameters.
- """
- for c_param, param in zip(self.collected_params, parameters):
- param.data.copy_(c_param.data)
diff --git a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/streamToAsyncIterable.ts b/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/streamToAsyncIterable.ts
deleted file mode 100644
index e935d719c8c29eb5e4efc30812f61b5f44716923..0000000000000000000000000000000000000000
--- a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/streamToAsyncIterable.ts
+++ /dev/null
@@ -1,15 +0,0 @@
-// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for-await...of#iterating_over_async_generators
-export async function* streamToAsyncIterable(
- stream: ReadableStream
-): AsyncIterableIterator {
- const reader = stream.getReader();
- try {
- while (true) {
- const { done, value } = await reader.read();
- if (done) return;
- yield value;
- }
- } finally {
- reader.releaseLock();
- }
-}
diff --git a/spaces/Aditya9790/yolo7-object-tracking/utils/aws/mime.sh b/spaces/Aditya9790/yolo7-object-tracking/utils/aws/mime.sh
deleted file mode 100644
index c319a83cfbdf09bea634c3bd9fca737c0b1dd505..0000000000000000000000000000000000000000
--- a/spaces/Aditya9790/yolo7-object-tracking/utils/aws/mime.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/
-# This script will run on every instance restart, not only on first start
-# --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA ---
-
-Content-Type: multipart/mixed; boundary="//"
-MIME-Version: 1.0
-
---//
-Content-Type: text/cloud-config; charset="us-ascii"
-MIME-Version: 1.0
-Content-Transfer-Encoding: 7bit
-Content-Disposition: attachment; filename="cloud-config.txt"
-
-#cloud-config
-cloud_final_modules:
-- [scripts-user, always]
-
---//
-Content-Type: text/x-shellscript; charset="us-ascii"
-MIME-Version: 1.0
-Content-Transfer-Encoding: 7bit
-Content-Disposition: attachment; filename="userdata.txt"
-
-#!/bin/bash
-# --- paste contents of userdata.sh here ---
---//
diff --git a/spaces/Aditya9790/yolo7-object-tracking/utils/google_app_engine/Dockerfile b/spaces/Aditya9790/yolo7-object-tracking/utils/google_app_engine/Dockerfile
deleted file mode 100644
index 0155618f475104e9858b81470339558156c94e13..0000000000000000000000000000000000000000
--- a/spaces/Aditya9790/yolo7-object-tracking/utils/google_app_engine/Dockerfile
+++ /dev/null
@@ -1,25 +0,0 @@
-FROM gcr.io/google-appengine/python
-
-# Create a virtualenv for dependencies. This isolates these packages from
-# system-level packages.
-# Use -p python3 or -p python3.7 to select python version. Default is version 2.
-RUN virtualenv /env -p python3
-
-# Setting these environment variables are the same as running
-# source /env/bin/activate.
-ENV VIRTUAL_ENV /env
-ENV PATH /env/bin:$PATH
-
-RUN apt-get update && apt-get install -y python-opencv
-
-# Copy the application's requirements.txt and run pip to install all
-# dependencies into the virtualenv.
-ADD requirements.txt /app/requirements.txt
-RUN pip install -r /app/requirements.txt
-
-# Add the application source code.
-ADD . /app
-
-# Run a WSGI server to serve the application. gunicorn must be declared as
-# a dependency in requirements.txt.
-CMD gunicorn -b :$PORT main:app
diff --git a/spaces/AgentVerse/agentVerse/ui/README.md b/spaces/AgentVerse/agentVerse/ui/README.md
deleted file mode 100644
index acfff8c574d8da6d78e65b684d6ed0a043d76a07..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/README.md
+++ /dev/null
@@ -1 +0,0 @@
-# Work in progress
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/clock/Clock.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/clock/Clock.d.ts
deleted file mode 100644
index d85c7816afe5fa3eedae1ac6208a08e211c0851c..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/clock/Clock.d.ts
+++ /dev/null
@@ -1,2 +0,0 @@
-import Base from '../base/Base';
-export default class Clock extends Base { }
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/container/Factory.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/container/Factory.d.ts
deleted file mode 100644
index e95eba9ba06402ea6929098aa01be9747043d91a..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/container/Factory.d.ts
+++ /dev/null
@@ -1,8 +0,0 @@
-// import * as Phaser from 'phaser';
-import Container from "./Container";
-
-export default function (
- x?: number, y?: number,
- width?: number, height?: number,
- children?: Phaser.GameObjects.GameObject[]
-): Container;
\ No newline at end of file
diff --git a/spaces/Agusbs98/automatic-ecg-diagnosis/nets/backbones.py b/spaces/Agusbs98/automatic-ecg-diagnosis/nets/backbones.py
deleted file mode 100644
index ef5d9b839788d662a234fa8ba01cf611508088fc..0000000000000000000000000000000000000000
--- a/spaces/Agusbs98/automatic-ecg-diagnosis/nets/backbones.py
+++ /dev/null
@@ -1,57 +0,0 @@
-
-import os, sys
-from libs import *
-from .layers import *
-from .modules import *
-from .bblocks import *
-
-class LightSEResNet18(nn.Module):
- def __init__(self,
- base_channels = 64,
- ):
- super(LightSEResNet18, self).__init__()
- self.bblock = LightSEResBlock
- self.stem = nn.Sequential(
- nn.Conv1d(
- 1, base_channels,
- kernel_size = 15, padding = 7, stride = 2,
- ),
- nn.BatchNorm1d(base_channels),
- nn.ReLU(),
- nn.MaxPool1d(
- kernel_size = 3, padding = 1, stride = 2,
- ),
- )
- self.stage_0 = nn.Sequential(
- self.bblock(base_channels),
- self.bblock(base_channels),
- )
-
- self.stage_1 = nn.Sequential(
- self.bblock(base_channels*1, downsample = True),
- self.bblock(base_channels*2),
- )
- self.stage_2 = nn.Sequential(
- self.bblock(base_channels*2, downsample = True),
- self.bblock(base_channels*4),
- )
- self.stage_3 = nn.Sequential(
- self.bblock(base_channels*4, downsample = True),
- self.bblock(base_channels*8),
- )
-
- self.pool = nn.AdaptiveAvgPool1d(1)
-
- def forward(self,
- input,
- ):
- output = self.stem(input)
- output = self.stage_0(output)
-
- output = self.stage_1(output)
- output = self.stage_2(output)
- output = self.stage_3(output)
-
- output = self.pool(output)
-
- return output
\ No newline at end of file
diff --git a/spaces/AlekseyCalvin/dreambooth-training3/train_dreambooth.py b/spaces/AlekseyCalvin/dreambooth-training3/train_dreambooth.py
deleted file mode 100644
index f4ff135e549f0d6c72f733092f3df817cb178e01..0000000000000000000000000000000000000000
--- a/spaces/AlekseyCalvin/dreambooth-training3/train_dreambooth.py
+++ /dev/null
@@ -1,889 +0,0 @@
-import argparse
-import itertools
-import math
-import os
-from pathlib import Path
-from typing import Optional
-import subprocess
-import sys
-import gc
-import random
-
-import torch
-import torch.nn.functional as F
-import torch.utils.checkpoint
-from torch.utils.data import Dataset
-
-from accelerate import Accelerator
-from accelerate.logging import get_logger
-from accelerate.utils import set_seed
-from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
-from diffusers.utils.import_utils import is_xformers_available
-from diffusers.optimization import get_scheduler
-from huggingface_hub import HfFolder, Repository, whoami
-from PIL import Image
-from torchvision import transforms
-from tqdm.auto import tqdm
-from transformers import CLIPTextModel, CLIPTokenizer
-
-
-logger = get_logger(__name__)
-
-
-def parse_args():
- parser = argparse.ArgumentParser(description="Simple example of a training script.")
- parser.add_argument(
- "--pretrained_model_name_or_path",
- type=str,
- default=None,
- #required=True,
- help="Path to pretrained model or model identifier from huggingface.co/models.",
- )
- parser.add_argument(
- "--tokenizer_name",
- type=str,
- default=None,
- help="Pretrained tokenizer name or path if not the same as model_name",
- )
- parser.add_argument(
- "--instance_data_dir",
- type=str,
- default=None,
- #required=True,
- help="A folder containing the training data of instance images.",
- )
- parser.add_argument(
- "--class_data_dir",
- type=str,
- default=None,
- #required=False,
- help="A folder containing the training data of class images.",
- )
- parser.add_argument(
- "--instance_prompt",
- type=str,
- default=None,
- help="The prompt with identifier specifying the instance",
- )
- parser.add_argument(
- "--class_prompt",
- type=str,
- default="",
- help="The prompt to specify images in the same class as provided instance images.",
- )
- parser.add_argument(
- "--with_prior_preservation",
- default=False,
- action="store_true",
- help="Flag to add prior preservation loss.",
- )
- parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
- parser.add_argument(
- "--num_class_images",
- type=int,
- default=100,
- help=(
- "Minimal class images for prior preservation loss. If not have enough images, additional images will be"
- " sampled with class_prompt."
- ),
- )
- parser.add_argument(
- "--output_dir",
- type=str,
- default="",
- help="The output directory where the model predictions and checkpoints will be written.",
- )
- parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
- parser.add_argument(
- "--resolution",
- type=int,
- default=512,
- help=(
- "The resolution for input images, all the images in the train/validation dataset will be resized to this"
- " resolution"
- ),
- )
- parser.add_argument(
- "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution"
- )
- parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder")
- parser.add_argument(
- "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
- )
- parser.add_argument(
- "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
- )
- parser.add_argument("--num_train_epochs", type=int, default=1)
- parser.add_argument(
- "--max_train_steps",
- type=int,
- default=None,
- help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
- )
- parser.add_argument(
- "--gradient_accumulation_steps",
- type=int,
- default=1,
- help="Number of updates steps to accumulate before performing a backward/update pass.",
- )
- parser.add_argument(
- "--gradient_checkpointing",
- action="store_true",
- help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
- )
- parser.add_argument(
- "--learning_rate",
- type=float,
- default=5e-6,
- help="Initial learning rate (after the potential warmup period) to use.",
- )
- parser.add_argument(
- "--scale_lr",
- action="store_true",
- default=False,
- help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
- )
- parser.add_argument(
- "--lr_scheduler",
- type=str,
- default="constant",
- help=(
- 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
- ' "constant", "constant_with_warmup"]'
- ),
- )
- parser.add_argument(
- "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
- )
- parser.add_argument(
- "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
- )
- parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
- parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
- parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
- parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
- parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
- parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
- parser.add_argument(
- "--hub_model_id",
- type=str,
- default=None,
- help="The name of the repository to keep in sync with the local `output_dir`.",
- )
- parser.add_argument(
- "--logging_dir",
- type=str,
- default="logs",
- help=(
- "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
- " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
- ),
- )
- parser.add_argument(
- "--mixed_precision",
- type=str,
- default="no",
- choices=["no", "fp16", "bf16"],
- help=(
- "Whether to use mixed precision. Choose"
- "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
- "and an Nvidia Ampere GPU."
- ),
- )
-
- parser.add_argument(
- "--save_n_steps",
- type=int,
- default=1,
- help=("Save the model every n global_steps"),
- )
-
-
- parser.add_argument(
- "--save_starting_step",
- type=int,
- default=1,
- help=("The step from which it starts saving intermediary checkpoints"),
- )
-
- parser.add_argument(
- "--stop_text_encoder_training",
- type=int,
- default=1000000,
- help=("The step at which the text_encoder is no longer trained"),
- )
-
-
- parser.add_argument(
- "--image_captions_filename",
- action="store_true",
- help="Get captions from filename",
- )
-
-
- parser.add_argument(
- "--dump_only_text_encoder",
- action="store_true",
- default=False,
- help="Dump only text encoder",
- )
-
- parser.add_argument(
- "--train_only_unet",
- action="store_true",
- default=False,
- help="Train only the unet",
- )
-
- parser.add_argument(
- "--cache_latents",
- action="store_true",
- default=False,
- help="Train only the unet",
- )
-
- parser.add_argument(
- "--Session_dir",
- type=str,
- default="",
- help="Current session directory",
- )
-
-
-
-
- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
-
- args = parser.parse_args()
- env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
- if env_local_rank != -1 and env_local_rank != args.local_rank:
- args.local_rank = env_local_rank
-
- #if args.instance_data_dir is None:
- # raise ValueError("You must specify a train data directory.")
-
- #if args.with_prior_preservation:
- # if args.class_data_dir is None:
- # raise ValueError("You must specify a data directory for class images.")
- # if args.class_prompt is None:
- # raise ValueError("You must specify prompt for class images.")
-
- return args
-
-
-class DreamBoothDataset(Dataset):
- """
- A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
- It pre-processes the images and the tokenizes prompts.
- """
-
- def __init__(
- self,
- instance_data_root,
- instance_prompt,
- tokenizer,
- args,
- class_data_root=None,
- class_prompt=None,
- size=512,
- center_crop=False,
- ):
- self.size = size
- self.center_crop = center_crop
- self.tokenizer = tokenizer
- self.image_captions_filename = None
-
- self.instance_data_root = Path(instance_data_root)
- if not self.instance_data_root.exists():
- raise ValueError("Instance images root doesn't exists.")
-
- self.instance_images_path = list(Path(instance_data_root).iterdir())
- self.num_instance_images = len(self.instance_images_path)
- self.instance_prompt = instance_prompt
- self._length = self.num_instance_images
-
- if args.image_captions_filename:
- self.image_captions_filename = True
-
- if class_data_root is not None:
- self.class_data_root = Path(class_data_root)
- self.class_data_root.mkdir(parents=True, exist_ok=True)
- self.class_images_path = list(self.class_data_root.iterdir())
- random.shuffle(self.class_images_path)
- self.num_class_images = len(self.class_images_path)
- self._length = max(self.num_class_images, self.num_instance_images)
- self.class_prompt = class_prompt
- else:
- self.class_data_root = None
-
- self.image_transforms = transforms.Compose(
- [
- transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
- transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
- transforms.ToTensor(),
- transforms.Normalize([0.5], [0.5]),
- ]
- )
-
- def __len__(self):
- return self._length
-
- def __getitem__(self, index):
- example = {}
- path = self.instance_images_path[index % self.num_instance_images]
- instance_image = Image.open(path)
- if not instance_image.mode == "RGB":
- instance_image = instance_image.convert("RGB")
-
- instance_prompt = self.instance_prompt
-
- if self.image_captions_filename:
- filename = Path(path).stem
- pt=''.join([i for i in filename if not i.isdigit()])
- pt=pt.replace("_"," ")
- pt=pt.replace("(","")
- pt=pt.replace(")","")
- pt=pt.replace("-","")
- instance_prompt = pt
- sys.stdout.write(" [0;32m" +instance_prompt+" [0m")
- sys.stdout.flush()
-
-
- example["instance_images"] = self.image_transforms(instance_image)
- example["instance_prompt_ids"] = self.tokenizer(
- instance_prompt,
- padding="do_not_pad",
- truncation=True,
- max_length=self.tokenizer.model_max_length,
- ).input_ids
-
- if self.class_data_root:
- class_image = Image.open(self.class_images_path[index % self.num_class_images])
- if not class_image.mode == "RGB":
- class_image = class_image.convert("RGB")
- example["class_images"] = self.image_transforms(class_image)
- example["class_prompt_ids"] = self.tokenizer(
- self.class_prompt,
- padding="do_not_pad",
- truncation=True,
- max_length=self.tokenizer.model_max_length,
- ).input_ids
-
- return example
-
-
-
-class PromptDataset(Dataset):
- "A simple dataset to prepare the prompts to generate class images on multiple GPUs."
-
- def __init__(self, prompt, num_samples):
- self.prompt = prompt
- self.num_samples = num_samples
-
- def __len__(self):
- return self.num_samples
-
- def __getitem__(self, index):
- example = {}
- example["prompt"] = self.prompt
- example["index"] = index
- return example
-
-class LatentsDataset(Dataset):
- def __init__(self, latents_cache, text_encoder_cache):
- self.latents_cache = latents_cache
- self.text_encoder_cache = text_encoder_cache
-
- def __len__(self):
- return len(self.latents_cache)
-
- def __getitem__(self, index):
- return self.latents_cache[index], self.text_encoder_cache[index]
-
-def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
- if token is None:
- token = HfFolder.get_token()
- if organization is None:
- username = whoami(token)["name"]
- return f"{username}/{model_id}"
- else:
- return f"{organization}/{model_id}"
-
-def merge_two_dicts(starting_dict: dict, updater_dict: dict) -> dict:
- """
- Starts from base starting dict and then adds the remaining key values from updater replacing the values from
- the first starting/base dict with the second updater dict.
-
- For later: how does d = {**d1, **d2} replace collision?
-
- :param starting_dict:
- :param updater_dict:
- :return:
- """
- new_dict: dict = starting_dict.copy() # start with keys and values of starting_dict
- new_dict.update(updater_dict) # modifies starting_dict with keys and values of updater_dict
- return new_dict
-
-def merge_args(args1: argparse.Namespace, args2: argparse.Namespace) -> argparse.Namespace:
- """
-
- ref: https://stackoverflow.com/questions/56136549/how-can-i-merge-two-argparse-namespaces-in-python-2-x
- :param args1:
- :param args2:
- :return:
- """
- # - the merged args
- # The vars() function returns the __dict__ attribute to values of the given object e.g {field:value}.
- merged_key_values_for_namespace: dict = merge_two_dicts(vars(args1), vars(args2))
- args = argparse.Namespace(**merged_key_values_for_namespace)
- return args
-
-def run_training(args_imported):
- args_default = parse_args()
- args = merge_args(args_default, args_imported)
- print(args)
- logging_dir = Path(args.output_dir, args.logging_dir)
- i=args.save_starting_step
- accelerator = Accelerator(
- gradient_accumulation_steps=args.gradient_accumulation_steps,
- mixed_precision=args.mixed_precision,
- log_with="tensorboard",
- logging_dir=logging_dir,
- )
-
- # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate
- # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models.
- # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate.
- if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1:
- raise ValueError(
- "Gradient accumulation is not supported when training the text encoder in distributed training. "
- "Please set gradient_accumulation_steps to 1. This feature will be supported in the future."
- )
-
- if args.seed is not None:
- set_seed(args.seed)
-
- if args.with_prior_preservation:
- class_images_dir = Path(args.class_data_dir)
- if not class_images_dir.exists():
- class_images_dir.mkdir(parents=True)
- cur_class_images = len(list(class_images_dir.iterdir()))
-
- if cur_class_images < args.num_class_images:
- torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
- pipeline = StableDiffusionPipeline.from_pretrained(
- args.pretrained_model_name_or_path, torch_dtype=torch_dtype
- )
- pipeline.set_progress_bar_config(disable=True)
-
- num_new_images = args.num_class_images - cur_class_images
- logger.info(f"Number of class images to sample: {num_new_images}.")
-
- sample_dataset = PromptDataset(args.class_prompt, num_new_images)
- sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
-
- sample_dataloader = accelerator.prepare(sample_dataloader)
- pipeline.to(accelerator.device)
-
- for example in tqdm(
- sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
- ):
- with torch.autocast("cuda"):
- images = pipeline(example["prompt"]).images
-
- for i, image in enumerate(images):
- image.save(class_images_dir / f"{example['index'][i] + cur_class_images}.jpg")
-
- del pipeline
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
-
- # Handle the repository creation
- if accelerator.is_main_process:
- if args.push_to_hub:
- if args.hub_model_id is None:
- repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
- else:
- repo_name = args.hub_model_id
- repo = Repository(args.output_dir, clone_from=repo_name)
-
- with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
- if "step_*" not in gitignore:
- gitignore.write("step_*\n")
- if "epoch_*" not in gitignore:
- gitignore.write("epoch_*\n")
- elif args.output_dir is not None:
- os.makedirs(args.output_dir, exist_ok=True)
-
- # Load the tokenizer
- if args.tokenizer_name:
- tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
- elif args.pretrained_model_name_or_path:
- tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
-
- # Load models and create wrapper for stable diffusion
- if args.train_only_unet:
- if os.path.exists(str(args.output_dir+"/text_encoder_trained")):
- text_encoder = CLIPTextModel.from_pretrained(args.output_dir, subfolder="text_encoder_trained")
- elif os.path.exists(str(args.output_dir+"/text_encoder")):
- text_encoder = CLIPTextModel.from_pretrained(args.output_dir, subfolder="text_encoder")
- else:
- text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
- else:
- text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
- vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
- unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
- if is_xformers_available():
- try:
- print("Enabling memory efficient attention with xformers...")
- unet.enable_xformers_memory_efficient_attention()
- except Exception as e:
- logger.warning(
- f"Could not enable memory efficient attention. Make sure xformers is installed correctly and a GPU is available: {e}"
- )
- vae.requires_grad_(False)
- if not args.train_text_encoder:
- text_encoder.requires_grad_(False)
-
- if args.gradient_checkpointing:
- unet.enable_gradient_checkpointing()
- if args.train_text_encoder:
- text_encoder.gradient_checkpointing_enable()
-
- if args.scale_lr:
- args.learning_rate = (
- args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
- )
-
- # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
- if args.use_8bit_adam:
- try:
- import bitsandbytes as bnb
- except ImportError:
- raise ImportError(
- "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
- )
-
- optimizer_class = bnb.optim.AdamW8bit
- else:
- optimizer_class = torch.optim.AdamW
-
- params_to_optimize = (
- itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters()
- )
- optimizer = optimizer_class(
- params_to_optimize,
- lr=args.learning_rate,
- betas=(args.adam_beta1, args.adam_beta2),
- weight_decay=args.adam_weight_decay,
- eps=args.adam_epsilon,
- )
-
- noise_scheduler = DDPMScheduler.from_config(args.pretrained_model_name_or_path, subfolder="scheduler")
-
- train_dataset = DreamBoothDataset(
- instance_data_root=args.instance_data_dir,
- instance_prompt=args.instance_prompt,
- class_data_root=args.class_data_dir if args.with_prior_preservation else None,
- class_prompt=args.class_prompt,
- tokenizer=tokenizer,
- size=args.resolution,
- center_crop=args.center_crop,
- args=args,
- )
-
- def collate_fn(examples):
- input_ids = [example["instance_prompt_ids"] for example in examples]
- pixel_values = [example["instance_images"] for example in examples]
-
- # Concat class and instance examples for prior preservation.
- # We do this to avoid doing two forward passes.
- if args.with_prior_preservation:
- input_ids += [example["class_prompt_ids"] for example in examples]
- pixel_values += [example["class_images"] for example in examples]
-
- pixel_values = torch.stack(pixel_values)
- pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
-
- input_ids = tokenizer.pad({"input_ids": input_ids}, padding=True, return_tensors="pt").input_ids
-
- batch = {
- "input_ids": input_ids,
- "pixel_values": pixel_values,
- }
- return batch
-
- train_dataloader = torch.utils.data.DataLoader(
- train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn
- )
-
- # Scheduler and math around the number of training steps.
- overrode_max_train_steps = False
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
- if args.max_train_steps is None:
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
- overrode_max_train_steps = True
-
- lr_scheduler = get_scheduler(
- args.lr_scheduler,
- optimizer=optimizer,
- num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps,
- num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
- )
-
- if args.train_text_encoder:
- unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
- unet, text_encoder, optimizer, train_dataloader, lr_scheduler
- )
- else:
- unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
- unet, optimizer, train_dataloader, lr_scheduler
- )
-
- weight_dtype = torch.float32
- if args.mixed_precision == "fp16":
- weight_dtype = torch.float16
- elif args.mixed_precision == "bf16":
- weight_dtype = torch.bfloat16
-
- # Move text_encode and vae to gpu.
- # For mixed precision training we cast the text_encoder and vae weights to half-precision
- # as these models are only used for inference, keeping weights in full precision is not required.
- vae.to(accelerator.device, dtype=weight_dtype)
- if not args.train_text_encoder:
- text_encoder.to(accelerator.device, dtype=weight_dtype)
-
-
- if args.cache_latents:
- latents_cache = []
- text_encoder_cache = []
- for batch in tqdm(train_dataloader, desc="Caching latents"):
- with torch.no_grad():
- batch["pixel_values"] = batch["pixel_values"].to(accelerator.device, non_blocking=True, dtype=weight_dtype)
- batch["input_ids"] = batch["input_ids"].to(accelerator.device, non_blocking=True)
- latents_cache.append(vae.encode(batch["pixel_values"]).latent_dist)
- if args.train_text_encoder:
- text_encoder_cache.append(batch["input_ids"])
- else:
- text_encoder_cache.append(text_encoder(batch["input_ids"])[0])
- train_dataset = LatentsDataset(latents_cache, text_encoder_cache)
- train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=1, collate_fn=lambda x: x, shuffle=True)
-
- del vae
- #if not args.train_text_encoder:
- # del text_encoder
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
-
- # We need to recalculate our total training steps as the size of the training dataloader may have changed.
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
- if overrode_max_train_steps:
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
- # Afterwards we recalculate our number of training epochs
- args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
-
- # We need to initialize the trackers we use, and also store our configuration.
- # The trackers initializes automatically on the main process.
- if accelerator.is_main_process:
- accelerator.init_trackers("dreambooth", config=vars(args))
-
- def bar(prg):
- br='|'+'█' * prg + ' ' * (25-prg)+'|'
- return br
-
- # Train!
- total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
-
- logger.info("***** Running training *****")
- logger.info(f" Num examples = {len(train_dataset)}")
- logger.info(f" Num batches each epoch = {len(train_dataloader)}")
- logger.info(f" Num Epochs = {args.num_train_epochs}")
- logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
- logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
- logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
- logger.info(f" Total optimization steps = {args.max_train_steps}")
- # Only show the progress bar once on each machine.
- progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
- global_step = 0
-
- for epoch in range(args.num_train_epochs):
- unet.train()
- if args.train_text_encoder:
- text_encoder.train()
- for step, batch in enumerate(train_dataloader):
- with accelerator.accumulate(unet):
- # Convert images to latent space
- with torch.no_grad():
- if args.cache_latents:
- latents_dist = batch[0][0]
- else:
- latents_dist = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist
- latents = latents_dist.sample() * 0.18215
-
- # Sample noise that we'll add to the latents
- noise = torch.randn_like(latents)
- bsz = latents.shape[0]
- # Sample a random timestep for each image
- timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
- timesteps = timesteps.long()
-
- # Add noise to the latents according to the noise magnitude at each timestep
- # (this is the forward diffusion process)
- noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
-
- # Get the text embedding for conditioning
- if(args.cache_latents):
- if args.train_text_encoder:
- encoder_hidden_states = text_encoder(batch[0][1])[0]
- else:
- encoder_hidden_states = batch[0][1]
- else:
- encoder_hidden_states = text_encoder(batch["input_ids"])[0]
-
- # Predict the noise residual
- model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
-
- # Get the target for loss depending on the prediction type
- if noise_scheduler.config.prediction_type == "epsilon":
- target = noise
- elif noise_scheduler.config.prediction_type == "v_prediction":
- target = noise_scheduler.get_velocity(latents, noise, timesteps)
- else:
- raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
-
- if args.with_prior_preservation:
- # Chunk the noise and model_pred into two parts and compute the loss on each part separately.
- model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0)
- target, target_prior = torch.chunk(target, 2, dim=0)
-
- # Compute instance loss
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="none").mean([1, 2, 3]).mean()
-
- # Compute prior loss
- prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean")
-
- # Add the prior loss to the instance loss.
- loss = loss + args.prior_loss_weight * prior_loss
- else:
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
-
- accelerator.backward(loss)
- if accelerator.sync_gradients:
- params_to_clip = (
- itertools.chain(unet.parameters(), text_encoder.parameters())
- if args.train_text_encoder
- else unet.parameters()
- )
- accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
- optimizer.step()
- lr_scheduler.step()
- optimizer.zero_grad()
-
- # Checks if the accelerator has performed an optimization step behind the scenes
- if accelerator.sync_gradients:
- progress_bar.update(1)
- global_step += 1
-
- fll=round((global_step*100)/args.max_train_steps)
- fll=round(fll/4)
- pr=bar(fll)
-
- logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
- progress_bar.set_postfix(**logs)
- progress_bar.set_description_str("Progress:"+pr)
- accelerator.log(logs, step=global_step)
-
- if global_step >= args.max_train_steps:
- break
-
- if args.train_text_encoder and global_step == args.stop_text_encoder_training and global_step >= 30:
- if accelerator.is_main_process:
- print(" [0;32m" +" Freezing the text_encoder ..."+" [0m")
- frz_dir=args.output_dir + "/text_encoder_frozen"
- if os.path.exists(frz_dir):
- subprocess.call('rm -r '+ frz_dir, shell=True)
- os.mkdir(frz_dir)
- pipeline = StableDiffusionPipeline.from_pretrained(
- args.pretrained_model_name_or_path,
- unet=accelerator.unwrap_model(unet),
- text_encoder=accelerator.unwrap_model(text_encoder),
- )
- pipeline.text_encoder.save_pretrained(frz_dir)
-
- if args.save_n_steps >= 200:
- if global_step < args.max_train_steps and global_step+1==i:
- ckpt_name = "_step_" + str(global_step+1)
- save_dir = Path(args.output_dir+ckpt_name)
- save_dir=str(save_dir)
- save_dir=save_dir.replace(" ", "_")
- if not os.path.exists(save_dir):
- os.mkdir(save_dir)
- inst=save_dir[16:]
- inst=inst.replace(" ", "_")
- print(" [1;32mSAVING CHECKPOINT: "+args.Session_dir+"/"+inst+".ckpt")
- # Create the pipeline using the trained modules and save it.
- if accelerator.is_main_process:
- pipeline = StableDiffusionPipeline.from_pretrained(
- args.pretrained_model_name_or_path,
- unet=accelerator.unwrap_model(unet),
- text_encoder=accelerator.unwrap_model(text_encoder),
- )
- pipeline.save_pretrained(save_dir)
- frz_dir=args.output_dir + "/text_encoder_frozen"
- if args.train_text_encoder and os.path.exists(frz_dir):
- subprocess.call('rm -r '+save_dir+'/text_encoder/*.*', shell=True)
- subprocess.call('cp -f '+frz_dir +'/*.* '+ save_dir+'/text_encoder', shell=True)
- chkpth=args.Session_dir+"/"+inst+".ckpt"
- subprocess.call('python /content/diffusers/scripts/convert_diffusers_to_original_stable_diffusion.py --model_path ' + save_dir + ' --checkpoint_path ' + chkpth + ' --half', shell=True)
- subprocess.call('rm -r '+ save_dir, shell=True)
- i=i+args.save_n_steps
-
- accelerator.wait_for_everyone()
-
- # Create the pipeline using using the trained modules and save it.
- if accelerator.is_main_process:
- if args.dump_only_text_encoder:
- txt_dir=args.output_dir + "/text_encoder_trained"
- if not os.path.exists(txt_dir):
- os.mkdir(txt_dir)
- pipeline = StableDiffusionPipeline.from_pretrained(
- args.pretrained_model_name_or_path,
- unet=accelerator.unwrap_model(unet),
- text_encoder=accelerator.unwrap_model(text_encoder),
- )
- pipeline.text_encoder.save_pretrained(txt_dir)
-
- elif args.train_only_unet:
- pipeline = StableDiffusionPipeline.from_pretrained(
- args.pretrained_model_name_or_path,
- unet=accelerator.unwrap_model(unet),
- text_encoder=accelerator.unwrap_model(text_encoder),
- )
- pipeline.save_pretrained(args.output_dir)
- txt_dir=args.output_dir + "/text_encoder_trained"
- subprocess.call('rm -r '+txt_dir, shell=True)
-
- else:
- pipeline = StableDiffusionPipeline.from_pretrained(
- args.pretrained_model_name_or_path,
- unet=accelerator.unwrap_model(unet),
- text_encoder=accelerator.unwrap_model(text_encoder),
- )
- frz_dir=args.output_dir + "/text_encoder_frozen"
- pipeline.save_pretrained(args.output_dir)
- if args.train_text_encoder and os.path.exists(frz_dir):
- subprocess.call('mv -f '+frz_dir +'/*.* '+ args.output_dir+'/text_encoder', shell=True)
- subprocess.call('rm -r '+ frz_dir, shell=True)
-
- if args.push_to_hub:
- repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True)
-
- accelerator.end_training()
- del pipeline
- torch.cuda.empty_cache()
- gc.collect()
-if __name__ == "__main__":
- pass
- #main()
-
diff --git a/spaces/Altinas/vits-uma-genshin-honkais/utils.py b/spaces/Altinas/vits-uma-genshin-honkais/utils.py
deleted file mode 100644
index ee4b01ddfbe8173965371b29f770f3e87615fe71..0000000000000000000000000000000000000000
--- a/spaces/Altinas/vits-uma-genshin-honkais/utils.py
+++ /dev/null
@@ -1,225 +0,0 @@
-import os
-import sys
-import argparse
-import logging
-import json
-import subprocess
-import numpy as np
-import librosa
-import torch
-
-MATPLOTLIB_FLAG = False
-
-logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
-logger = logging
-
-
-def load_checkpoint(checkpoint_path, model, optimizer=None):
- assert os.path.isfile(checkpoint_path)
- checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
- iteration = checkpoint_dict['iteration']
- learning_rate = checkpoint_dict['learning_rate']
- if optimizer is not None:
- optimizer.load_state_dict(checkpoint_dict['optimizer'])
- saved_state_dict = checkpoint_dict['model']
- if hasattr(model, 'module'):
- state_dict = model.module.state_dict()
- else:
- state_dict = model.state_dict()
- new_state_dict= {}
- for k, v in state_dict.items():
- try:
- new_state_dict[k] = saved_state_dict[k]
- except:
- logger.info("%s is not in the checkpoint" % k)
- new_state_dict[k] = v
- if hasattr(model, 'module'):
- model.module.load_state_dict(new_state_dict)
- else:
- model.load_state_dict(new_state_dict)
- logger.info("Loaded checkpoint '{}' (iteration {})" .format(
- checkpoint_path, iteration))
- return model, optimizer, learning_rate, iteration
-
-
-def plot_spectrogram_to_numpy(spectrogram):
- global MATPLOTLIB_FLAG
- if not MATPLOTLIB_FLAG:
- import matplotlib
- matplotlib.use("Agg")
- MATPLOTLIB_FLAG = True
- mpl_logger = logging.getLogger('matplotlib')
- mpl_logger.setLevel(logging.WARNING)
- import matplotlib.pylab as plt
- import numpy as np
-
- fig, ax = plt.subplots(figsize=(10,2))
- im = ax.imshow(spectrogram, aspect="auto", origin="lower",
- interpolation='none')
- plt.colorbar(im, ax=ax)
- plt.xlabel("Frames")
- plt.ylabel("Channels")
- plt.tight_layout()
-
- fig.canvas.draw()
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
- plt.close()
- return data
-
-
-def plot_alignment_to_numpy(alignment, info=None):
- global MATPLOTLIB_FLAG
- if not MATPLOTLIB_FLAG:
- import matplotlib
- matplotlib.use("Agg")
- MATPLOTLIB_FLAG = True
- mpl_logger = logging.getLogger('matplotlib')
- mpl_logger.setLevel(logging.WARNING)
- import matplotlib.pylab as plt
- import numpy as np
-
- fig, ax = plt.subplots(figsize=(6, 4))
- im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
- interpolation='none')
- fig.colorbar(im, ax=ax)
- xlabel = 'Decoder timestep'
- if info is not None:
- xlabel += '\n\n' + info
- plt.xlabel(xlabel)
- plt.ylabel('Encoder timestep')
- plt.tight_layout()
-
- fig.canvas.draw()
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
- plt.close()
- return data
-
-
-def load_audio_to_torch(full_path, target_sampling_rate):
- audio, sampling_rate = librosa.load(full_path, sr=target_sampling_rate, mono=True)
- return torch.FloatTensor(audio.astype(np.float32))
-
-
-def load_filepaths_and_text(filename, split="|"):
- with open(filename, encoding='utf-8') as f:
- filepaths_and_text = [line.strip().split(split) for line in f]
- return filepaths_and_text
-
-
-def get_hparams(init=True):
- parser = argparse.ArgumentParser()
- parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
- help='JSON file for configuration')
- parser.add_argument('-m', '--model', type=str, required=True,
- help='Model name')
-
- args = parser.parse_args()
- model_dir = os.path.join("./logs", args.model)
-
- if not os.path.exists(model_dir):
- os.makedirs(model_dir)
-
- config_path = args.config
- config_save_path = os.path.join(model_dir, "config.json")
- if init:
- with open(config_path, "r") as f:
- data = f.read()
- with open(config_save_path, "w") as f:
- f.write(data)
- else:
- with open(config_save_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams = HParams(**config)
- hparams.model_dir = model_dir
- return hparams
-
-
-def get_hparams_from_dir(model_dir):
- config_save_path = os.path.join(model_dir, "config.json")
- with open(config_save_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams =HParams(**config)
- hparams.model_dir = model_dir
- return hparams
-
-
-def get_hparams_from_file(config_path):
- with open(config_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams =HParams(**config)
- return hparams
-
-
-def check_git_hash(model_dir):
- source_dir = os.path.dirname(os.path.realpath(__file__))
- if not os.path.exists(os.path.join(source_dir, ".git")):
- logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
- source_dir
- ))
- return
-
- cur_hash = subprocess.getoutput("git rev-parse HEAD")
-
- path = os.path.join(model_dir, "githash")
- if os.path.exists(path):
- saved_hash = open(path).read()
- if saved_hash != cur_hash:
- logger.warn("git hash values are different. {}(saved) != {}(current)".format(
- saved_hash[:8], cur_hash[:8]))
- else:
- open(path, "w").write(cur_hash)
-
-
-def get_logger(model_dir, filename="train.log"):
- global logger
- logger = logging.getLogger(os.path.basename(model_dir))
- logger.setLevel(logging.DEBUG)
-
- formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
- if not os.path.exists(model_dir):
- os.makedirs(model_dir)
- h = logging.FileHandler(os.path.join(model_dir, filename))
- h.setLevel(logging.DEBUG)
- h.setFormatter(formatter)
- logger.addHandler(h)
- return logger
-
-
-class HParams():
- def __init__(self, **kwargs):
- for k, v in kwargs.items():
- if type(v) == dict:
- v = HParams(**v)
- self[k] = v
-
- def keys(self):
- return self.__dict__.keys()
-
- def items(self):
- return self.__dict__.items()
-
- def values(self):
- return self.__dict__.values()
-
- def __len__(self):
- return len(self.__dict__)
-
- def __getitem__(self, key):
- return getattr(self, key)
-
- def __setitem__(self, key, value):
- return setattr(self, key, value)
-
- def __contains__(self, key):
- return key in self.__dict__
-
- def __repr__(self):
- return self.__dict__.__repr__()
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py
deleted file mode 100644
index 49ab2304c146259cdb186457a92fd35cd0ebdfa5..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py
+++ /dev/null
@@ -1,772 +0,0 @@
-# Copyright 2023 TencentARC and The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import inspect
-import warnings
-from dataclasses import dataclass
-from typing import Any, Callable, Dict, List, Optional, Union
-
-import numpy as np
-import PIL
-import torch
-from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
-
-from ...image_processor import VaeImageProcessor
-from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin
-from ...models import AutoencoderKL, MultiAdapter, T2IAdapter, UNet2DConditionModel
-from ...schedulers import KarrasDiffusionSchedulers
-from ...utils import (
- PIL_INTERPOLATION,
- BaseOutput,
- is_accelerate_available,
- is_accelerate_version,
- logging,
- randn_tensor,
- replace_example_docstring,
-)
-from ..pipeline_utils import DiffusionPipeline
-from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker
-
-
-@dataclass
-class StableDiffusionAdapterPipelineOutput(BaseOutput):
- """
- Args:
- images (`List[PIL.Image.Image]` or `np.ndarray`)
- List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width,
- num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline.
- nsfw_content_detected (`List[bool]`)
- List of flags denoting whether the corresponding generated image likely represents "not-safe-for-work"
- (nsfw) content, or `None` if safety checking could not be performed.
- """
-
- images: Union[List[PIL.Image.Image], np.ndarray]
- nsfw_content_detected: Optional[List[bool]]
-
-
-logger = logging.get_logger(__name__) # pylint: disable=invalid-name
-
-EXAMPLE_DOC_STRING = """
- Examples:
- ```py
- >>> from PIL import Image
- >>> from diffusers.utils import load_image
- >>> import torch
- >>> from diffusers import StableDiffusionAdapterPipeline, T2IAdapter
-
- >>> image = load_image(
- ... "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/t2i-adapter/color_ref.png"
- ... )
-
- >>> color_palette = image.resize((8, 8))
- >>> color_palette = color_palette.resize((512, 512), resample=Image.Resampling.NEAREST)
-
- >>> adapter = T2IAdapter.from_pretrained("TencentARC/t2iadapter_color_sd14v1", torch_dtype=torch.float16)
- >>> pipe = StableDiffusionAdapterPipeline.from_pretrained(
- ... "CompVis/stable-diffusion-v1-4",
- ... adapter=adapter,
- ... torch_dtype=torch.float16,
- ... )
-
- >>> pipe.to("cuda")
-
- >>> out_image = pipe(
- ... "At night, glowing cubes in front of the beach",
- ... image=color_palette,
- ... ).images[0]
- ```
-"""
-
-
-def _preprocess_adapter_image(image, height, width):
- if isinstance(image, torch.Tensor):
- return image
- elif isinstance(image, PIL.Image.Image):
- image = [image]
-
- if isinstance(image[0], PIL.Image.Image):
- image = [np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])) for i in image]
- image = [
- i[None, ..., None] if i.ndim == 2 else i[None, ...] for i in image
- ] # expand [h, w] or [h, w, c] to [b, h, w, c]
- image = np.concatenate(image, axis=0)
- image = np.array(image).astype(np.float32) / 255.0
- image = image.transpose(0, 3, 1, 2)
- image = torch.from_numpy(image)
- elif isinstance(image[0], torch.Tensor):
- if image[0].ndim == 3:
- image = torch.stack(image, dim=0)
- elif image[0].ndim == 4:
- image = torch.cat(image, dim=0)
- else:
- raise ValueError(
- f"Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but recive: {image[0].ndim}"
- )
- return image
-
-
-class StableDiffusionAdapterPipeline(DiffusionPipeline):
- r"""
- Pipeline for text-to-image generation using Stable Diffusion augmented with T2I-Adapter
- https://arxiv.org/abs/2302.08453
-
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
-
- Args:
- adapter ([`T2IAdapter`] or [`MultiAdapter`] or `List[T2IAdapter]`):
- Provides additional conditioning to the unet during the denoising process. If you set multiple Adapter as a
- list, the outputs from each Adapter are added together to create one combined additional conditioning.
- adapter_weights (`List[float]`, *optional*, defaults to None):
- List of floats representing the weight which will be multiply to each adapter's output before adding them
- together.
- vae ([`AutoencoderKL`]):
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
- text_encoder ([`CLIPTextModel`]):
- Frozen text-encoder. Stable Diffusion uses the text portion of
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
- tokenizer (`CLIPTokenizer`):
- Tokenizer of class
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
- scheduler ([`SchedulerMixin`]):
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
- safety_checker ([`StableDiffusionSafetyChecker`]):
- Classification module that estimates whether generated images could be considered offensive or harmful.
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
- feature_extractor ([`CLIPFeatureExtractor`]):
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
- """
- _optional_components = ["safety_checker", "feature_extractor"]
-
- def __init__(
- self,
- vae: AutoencoderKL,
- text_encoder: CLIPTextModel,
- tokenizer: CLIPTokenizer,
- unet: UNet2DConditionModel,
- adapter: Union[T2IAdapter, MultiAdapter, List[T2IAdapter]],
- scheduler: KarrasDiffusionSchedulers,
- safety_checker: StableDiffusionSafetyChecker,
- feature_extractor: CLIPFeatureExtractor,
- adapter_weights: Optional[List[float]] = None,
- requires_safety_checker: bool = True,
- ):
- super().__init__()
-
- if safety_checker is None and requires_safety_checker:
- logger.warning(
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
- )
-
- if safety_checker is not None and feature_extractor is None:
- raise ValueError(
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
- )
-
- if isinstance(adapter, (list, tuple)):
- adapter = MultiAdapter(adapter, adapter_weights=adapter_weights)
-
- self.register_modules(
- vae=vae,
- text_encoder=text_encoder,
- tokenizer=tokenizer,
- unet=unet,
- adapter=adapter,
- scheduler=scheduler,
- safety_checker=safety_checker,
- feature_extractor=feature_extractor,
- )
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
- self.register_to_config(requires_safety_checker=requires_safety_checker)
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
- def enable_vae_slicing(self):
- r"""
- Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
- compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
- """
- self.vae.enable_slicing()
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
- def disable_vae_slicing(self):
- r"""
- Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
- computing decoding in one step.
- """
- self.vae.disable_slicing()
-
- def enable_model_cpu_offload(self, gpu_id=0):
- r"""
- Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
- to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
- method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
- `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
- """
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
- from accelerate import cpu_offload_with_hook
- else:
- raise ImportError("`enable_model_offload` requires `accelerate v0.17.0` or higher.")
-
- device = torch.device(f"cuda:{gpu_id}")
-
- if self.device.type != "cpu":
- self.to("cpu", silence_dtype_warnings=True)
- torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
-
- hook = None
- for cpu_offloaded_model in [self.text_encoder, self.adapter, self.unet, self.vae]:
- _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
-
- if self.safety_checker is not None:
- _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
-
- # We'll offload the last model manually.
- self.final_offload_hook = hook
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
- def _encode_prompt(
- self,
- prompt,
- device,
- num_images_per_prompt,
- do_classifier_free_guidance,
- negative_prompt=None,
- prompt_embeds: Optional[torch.FloatTensor] = None,
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
- lora_scale: Optional[float] = None,
- ):
- r"""
- Encodes the prompt into text encoder hidden states.
-
- Args:
- prompt (`str` or `List[str]`, *optional*):
- prompt to be encoded
- device: (`torch.device`):
- torch device
- num_images_per_prompt (`int`):
- number of images that should be generated per prompt
- do_classifier_free_guidance (`bool`):
- whether to use classifier free guidance or not
- negative_prompt (`str` or `List[str]`, *optional*):
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
- less than `1`).
- prompt_embeds (`torch.FloatTensor`, *optional*):
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
- provided, text embeddings will be generated from `prompt` input argument.
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
- argument.
- lora_scale (`float`, *optional*):
- A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
- """
- # set lora scale so that monkey patched LoRA
- # function of text encoder can correctly access it
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
- self._lora_scale = lora_scale
-
- if prompt is not None and isinstance(prompt, str):
- batch_size = 1
- elif prompt is not None and isinstance(prompt, list):
- batch_size = len(prompt)
- else:
- batch_size = prompt_embeds.shape[0]
-
- if prompt_embeds is None:
- # textual inversion: procecss multi-vector tokens if necessary
- if isinstance(self, TextualInversionLoaderMixin):
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
-
- text_inputs = self.tokenizer(
- prompt,
- padding="max_length",
- max_length=self.tokenizer.model_max_length,
- truncation=True,
- return_tensors="pt",
- )
- text_input_ids = text_inputs.input_ids
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
-
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
- text_input_ids, untruncated_ids
- ):
- removed_text = self.tokenizer.batch_decode(
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
- )
- logger.warning(
- "The following part of your input was truncated because CLIP can only handle sequences up to"
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
- )
-
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
- attention_mask = text_inputs.attention_mask.to(device)
- else:
- attention_mask = None
-
- prompt_embeds = self.text_encoder(
- text_input_ids.to(device),
- attention_mask=attention_mask,
- )
- prompt_embeds = prompt_embeds[0]
-
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
-
- bs_embed, seq_len, _ = prompt_embeds.shape
- # duplicate text embeddings for each generation per prompt, using mps friendly method
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
-
- # get unconditional embeddings for classifier free guidance
- if do_classifier_free_guidance and negative_prompt_embeds is None:
- uncond_tokens: List[str]
- if negative_prompt is None:
- uncond_tokens = [""] * batch_size
- elif prompt is not None and type(prompt) is not type(negative_prompt):
- raise TypeError(
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
- f" {type(prompt)}."
- )
- elif isinstance(negative_prompt, str):
- uncond_tokens = [negative_prompt]
- elif batch_size != len(negative_prompt):
- raise ValueError(
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
- " the batch size of `prompt`."
- )
- else:
- uncond_tokens = negative_prompt
-
- # textual inversion: procecss multi-vector tokens if necessary
- if isinstance(self, TextualInversionLoaderMixin):
- uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
-
- max_length = prompt_embeds.shape[1]
- uncond_input = self.tokenizer(
- uncond_tokens,
- padding="max_length",
- max_length=max_length,
- truncation=True,
- return_tensors="pt",
- )
-
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
- attention_mask = uncond_input.attention_mask.to(device)
- else:
- attention_mask = None
-
- negative_prompt_embeds = self.text_encoder(
- uncond_input.input_ids.to(device),
- attention_mask=attention_mask,
- )
- negative_prompt_embeds = negative_prompt_embeds[0]
-
- if do_classifier_free_guidance:
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
- seq_len = negative_prompt_embeds.shape[1]
-
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
-
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
-
- # For classifier free guidance, we need to do two forward passes.
- # Here we concatenate the unconditional and text embeddings into a single batch
- # to avoid doing two forward passes
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
-
- return prompt_embeds
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
- def run_safety_checker(self, image, device, dtype):
- if self.safety_checker is None:
- has_nsfw_concept = None
- else:
- if torch.is_tensor(image):
- feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
- else:
- feature_extractor_input = self.image_processor.numpy_to_pil(image)
- safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
- image, has_nsfw_concept = self.safety_checker(
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
- )
- return image, has_nsfw_concept
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
- def decode_latents(self, latents):
- warnings.warn(
- "The decode_latents method is deprecated and will be removed in a future version. Please"
- " use VaeImageProcessor instead",
- FutureWarning,
- )
- latents = 1 / self.vae.config.scaling_factor * latents
- image = self.vae.decode(latents, return_dict=False)[0]
- image = (image / 2 + 0.5).clamp(0, 1)
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
- return image
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
- def prepare_extra_step_kwargs(self, generator, eta):
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
- # and should be between [0, 1]
-
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
- extra_step_kwargs = {}
- if accepts_eta:
- extra_step_kwargs["eta"] = eta
-
- # check if the scheduler accepts generator
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
- if accepts_generator:
- extra_step_kwargs["generator"] = generator
- return extra_step_kwargs
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs
- def check_inputs(
- self,
- prompt,
- height,
- width,
- callback_steps,
- negative_prompt=None,
- prompt_embeds=None,
- negative_prompt_embeds=None,
- ):
- if height % 8 != 0 or width % 8 != 0:
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
-
- if (callback_steps is None) or (
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
- ):
- raise ValueError(
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
- f" {type(callback_steps)}."
- )
-
- if prompt is not None and prompt_embeds is not None:
- raise ValueError(
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
- " only forward one of the two."
- )
- elif prompt is None and prompt_embeds is None:
- raise ValueError(
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
- )
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
-
- if negative_prompt is not None and negative_prompt_embeds is not None:
- raise ValueError(
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
- )
-
- if prompt_embeds is not None and negative_prompt_embeds is not None:
- if prompt_embeds.shape != negative_prompt_embeds.shape:
- raise ValueError(
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
- f" {negative_prompt_embeds.shape}."
- )
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
- shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
- if isinstance(generator, list) and len(generator) != batch_size:
- raise ValueError(
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
- )
-
- if latents is None:
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
- else:
- latents = latents.to(device)
-
- # scale the initial noise by the standard deviation required by the scheduler
- latents = latents * self.scheduler.init_noise_sigma
- return latents
-
- def _default_height_width(self, height, width, image):
- # NOTE: It is possible that a list of images have different
- # dimensions for each image, so just checking the first image
- # is not _exactly_ correct, but it is simple.
- while isinstance(image, list):
- image = image[0]
-
- if height is None:
- if isinstance(image, PIL.Image.Image):
- height = image.height
- elif isinstance(image, torch.Tensor):
- height = image.shape[-2]
-
- # round down to nearest multiple of `self.adapter.total_downscale_factor`
- height = (height // self.adapter.total_downscale_factor) * self.adapter.total_downscale_factor
-
- if width is None:
- if isinstance(image, PIL.Image.Image):
- width = image.width
- elif isinstance(image, torch.Tensor):
- width = image.shape[-1]
-
- # round down to nearest multiple of `self.adapter.total_downscale_factor`
- width = (width // self.adapter.total_downscale_factor) * self.adapter.total_downscale_factor
-
- return height, width
-
- @torch.no_grad()
- @replace_example_docstring(EXAMPLE_DOC_STRING)
- def __call__(
- self,
- prompt: Union[str, List[str]] = None,
- image: Union[torch.Tensor, PIL.Image.Image, List[PIL.Image.Image]] = None,
- height: Optional[int] = None,
- width: Optional[int] = None,
- num_inference_steps: int = 50,
- guidance_scale: float = 7.5,
- negative_prompt: Optional[Union[str, List[str]]] = None,
- num_images_per_prompt: Optional[int] = 1,
- eta: float = 0.0,
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
- latents: Optional[torch.FloatTensor] = None,
- prompt_embeds: Optional[torch.FloatTensor] = None,
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
- output_type: Optional[str] = "pil",
- return_dict: bool = True,
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
- callback_steps: int = 1,
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
- adapter_conditioning_scale: Union[float, List[float]] = 1.0,
- ):
- r"""
- Function invoked when calling the pipeline for generation.
-
- Args:
- prompt (`str` or `List[str]`, *optional*):
- The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
- instead.
- image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]` or `List[PIL.Image.Image]` or `List[List[PIL.Image.Image]]`):
- The Adapter input condition. Adapter uses this input condition to generate guidance to Unet. If the
- type is specified as `Torch.FloatTensor`, it is passed to Adapter as is. PIL.Image.Image` can also be
- accepted as an image. The control image is automatically resized to fit the output image.
- height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
- The height in pixels of the generated image.
- width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
- The width in pixels of the generated image.
- num_inference_steps (`int`, *optional*, defaults to 50):
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
- expense of slower inference.
- guidance_scale (`float`, *optional*, defaults to 7.5):
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
- usually at the expense of lower image quality.
- negative_prompt (`str` or `List[str]`, *optional*):
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
- `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
- Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
- num_images_per_prompt (`int`, *optional*, defaults to 1):
- The number of images to generate per prompt.
- eta (`float`, *optional*, defaults to 0.0):
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
- [`schedulers.DDIMScheduler`], will be ignored for others.
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
- to make generation deterministic.
- latents (`torch.FloatTensor`, *optional*):
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
- tensor will ge generated by sampling using the supplied random `generator`.
- prompt_embeds (`torch.FloatTensor`, *optional*):
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
- provided, text embeddings will be generated from `prompt` input argument.
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
- argument.
- output_type (`str`, *optional*, defaults to `"pil"`):
- The output format of the generate image. Choose between
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
- return_dict (`bool`, *optional*, defaults to `True`):
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] instead
- of a plain tuple.
- callback (`Callable`, *optional*):
- A function that will be called every `callback_steps` steps during inference. The function will be
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
- callback_steps (`int`, *optional*, defaults to 1):
- The frequency at which the `callback` function will be called. If not specified, the callback will be
- called at every step.
- cross_attention_kwargs (`dict`, *optional*):
- A kwargs dictionary that if specified is passed along to the `AttnProcessor` as defined under
- `self.processor` in
- [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
- adapter_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
- The outputs of the adapter are multiplied by `adapter_conditioning_scale` before they are added to the
- residual in the original unet. If multiple adapters are specified in init, you can set the
- corresponding scale as a list.
-
- Examples:
-
- Returns:
- [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] or `tuple`:
- [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] if `return_dict` is True, otherwise a
- `tuple. When returning a tuple, the first element is a list with the generated images, and the second
- element is a list of `bool`s denoting whether the corresponding generated image likely represents
- "not-safe-for-work" (nsfw) content, according to the `safety_checker`.
- """
- # 0. Default height and width to unet
- height, width = self._default_height_width(height, width, image)
- device = self._execution_device
-
- # 1. Check inputs. Raise error if not correct
- self.check_inputs(
- prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
- )
-
- is_multi_adapter = isinstance(self.adapter, MultiAdapter)
- if is_multi_adapter:
- adapter_input = [_preprocess_adapter_image(img, height, width).to(device) for img in image]
- n, c, h, w = adapter_input[0].shape
- adapter_input = torch.stack([x.reshape([n * c, h, w]) for x in adapter_input])
- else:
- adapter_input = _preprocess_adapter_image(image, height, width).to(device)
- adapter_input = adapter_input.to(self.adapter.dtype)
-
- # 2. Define call parameters
- if prompt is not None and isinstance(prompt, str):
- batch_size = 1
- elif prompt is not None and isinstance(prompt, list):
- batch_size = len(prompt)
- else:
- batch_size = prompt_embeds.shape[0]
-
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
- # corresponds to doing no classifier free guidance.
- do_classifier_free_guidance = guidance_scale > 1.0
-
- # 3. Encode input prompt
- prompt_embeds = self._encode_prompt(
- prompt,
- device,
- num_images_per_prompt,
- do_classifier_free_guidance,
- negative_prompt,
- prompt_embeds=prompt_embeds,
- negative_prompt_embeds=negative_prompt_embeds,
- )
-
- # 4. Prepare timesteps
- self.scheduler.set_timesteps(num_inference_steps, device=device)
- timesteps = self.scheduler.timesteps
-
- # 5. Prepare latent variables
- num_channels_latents = self.unet.config.in_channels
- latents = self.prepare_latents(
- batch_size * num_images_per_prompt,
- num_channels_latents,
- height,
- width,
- prompt_embeds.dtype,
- device,
- generator,
- latents,
- )
-
- # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
-
- # 7. Denoising loop
- adapter_state = self.adapter(adapter_input)
- for k, v in enumerate(adapter_state):
- adapter_state[k] = v * adapter_conditioning_scale
- if num_images_per_prompt > 1:
- for k, v in enumerate(adapter_state):
- adapter_state[k] = v.repeat(num_images_per_prompt, 1, 1, 1)
- if do_classifier_free_guidance:
- for k, v in enumerate(adapter_state):
- adapter_state[k] = torch.cat([v] * 2, dim=0)
-
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
- with self.progress_bar(total=num_inference_steps) as progress_bar:
- for i, t in enumerate(timesteps):
- # expand the latents if we are doing classifier free guidance
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
-
- # predict the noise residual
- noise_pred = self.unet(
- latent_model_input,
- t,
- encoder_hidden_states=prompt_embeds,
- cross_attention_kwargs=cross_attention_kwargs,
- down_block_additional_residuals=[state.clone() for state in adapter_state],
- ).sample
-
- # perform guidance
- if do_classifier_free_guidance:
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
-
- # compute the previous noisy sample x_t -> x_t-1
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
-
- # call the callback, if provided
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
- progress_bar.update()
- if callback is not None and i % callback_steps == 0:
- callback(i, t, latents)
-
- if output_type == "latent":
- image = latents
- has_nsfw_concept = None
- elif output_type == "pil":
- # 8. Post-processing
- image = self.decode_latents(latents)
-
- # 9. Run safety checker
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
-
- # 10. Convert to PIL
- image = self.numpy_to_pil(image)
- else:
- # 8. Post-processing
- image = self.decode_latents(latents)
-
- # 9. Run safety checker
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
-
- # Offload last model to CPU
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
- self.final_offload_hook.offload()
-
- if not return_dict:
- return (image, has_nsfw_concept)
-
- return StableDiffusionAdapterPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/fp16/README.md b/spaces/Andy1621/uniformer_image_detection/configs/fp16/README.md
deleted file mode 100644
index 17eaa7d1dea393cbf9b8e3fd44c607b447812e6f..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/fp16/README.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# Mixed Precision Training
-
-## Introduction
-
-[OTHERS]
-
-```latex
-@article{micikevicius2017mixed,
- title={Mixed precision training},
- author={Micikevicius, Paulius and Narang, Sharan and Alben, Jonah and Diamos, Gregory and Elsen, Erich and Garcia, David and Ginsburg, Boris and Houston, Michael and Kuchaiev, Oleksii and Venkatesh, Ganesh and others},
- journal={arXiv preprint arXiv:1710.03740},
- year={2017}
-}
-```
-
-## Results and Models
-
-| Architecture | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download |
-|:------------:|:---------:|:-------:|:-------:|:--------:|:--------------:|:------:|:-------:|:------:|:--------:|
-| Faster R-CNN | R-50 | pytorch | 1x | 3.4 | 28.8 | 37.5 | - |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fp16/faster_rcnn_r50_fpn_fp16_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/fp16/faster_rcnn_r50_fpn_fp16_1x_coco/faster_rcnn_r50_fpn_fp16_1x_coco_20200204-d4dc1471.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/fp16/faster_rcnn_r50_fpn_fp16_1x_coco/faster_rcnn_r50_fpn_fp16_1x_coco_20200204_143530.log.json) |
-| Mask R-CNN | R-50 | pytorch | 1x | 3.6 | 24.1 | 38.1 | 34.7 |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fp16/mask_rcnn_r50_fpn_fp16_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_1x_coco/mask_rcnn_r50_fpn_fp16_1x_coco_20200205-59faf7e4.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_1x_coco/mask_rcnn_r50_fpn_fp16_1x_coco_20200205_130539.log.json) |
-| Retinanet | R-50 | pytorch | 1x | 2.8 | 31.6 | 36.4 | |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fp16/retinanet_r50_fpn_fp16_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/fp16/retinanet_r50_fpn_fp16_1x_coco/retinanet_r50_fpn_fp16_1x_coco_20200702-0dbfb212.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/fp16/retinanet_r50_fpn_fp16_1x_coco/retinanet_r50_fpn_fp16_1x_coco_20200702_020127.log.json) |
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr48_512x512_80k_ade20k.py b/spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr48_512x512_80k_ade20k.py
deleted file mode 100644
index 04971226eb0fd6461b715358ac955dfb78102992..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr48_512x512_80k_ade20k.py
+++ /dev/null
@@ -1,39 +0,0 @@
-_base_ = './ocrnet_hr18_512x512_80k_ade20k.py'
-norm_cfg = dict(type='SyncBN', requires_grad=True)
-model = dict(
- pretrained='open-mmlab://msra/hrnetv2_w48',
- backbone=dict(
- extra=dict(
- stage2=dict(num_channels=(48, 96)),
- stage3=dict(num_channels=(48, 96, 192)),
- stage4=dict(num_channels=(48, 96, 192, 384)))),
- decode_head=[
- dict(
- type='FCNHead',
- in_channels=[48, 96, 192, 384],
- channels=sum([48, 96, 192, 384]),
- input_transform='resize_concat',
- in_index=(0, 1, 2, 3),
- kernel_size=1,
- num_convs=1,
- norm_cfg=norm_cfg,
- concat_input=False,
- dropout_ratio=-1,
- num_classes=150,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
- dict(
- type='OCRHead',
- in_channels=[48, 96, 192, 384],
- channels=512,
- ocr_channels=256,
- input_transform='resize_concat',
- in_index=(0, 1, 2, 3),
- norm_cfg=norm_cfg,
- dropout_ratio=-1,
- num_classes=150,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
- ])
diff --git a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/.github/ISSUE_TEMPLATE/feature_request.md b/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/.github/ISSUE_TEMPLATE/feature_request.md
deleted file mode 100644
index b94974f865491731a1251e3e9736e01cbe81b06f..0000000000000000000000000000000000000000
--- a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/.github/ISSUE_TEMPLATE/feature_request.md
+++ /dev/null
@@ -1,16 +0,0 @@
----
-name: Feature request
-about: Suggest an improvement or new feature for the web UI
-title: ''
-labels: 'enhancement'
-assignees: ''
-
----
-
-**Description**
-
-A clear and concise description of what you want to be implemented.
-
-**Additional Context**
-
-If applicable, please provide any extra information, external links, or screenshots that could be useful.
diff --git a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/character_bias/script.py b/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/character_bias/script.py
deleted file mode 100644
index ff12f3afdc28be4ead12ffab90bd9fbd783514a2..0000000000000000000000000000000000000000
--- a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/character_bias/script.py
+++ /dev/null
@@ -1,83 +0,0 @@
-import os
-
-import gradio as gr
-
-# get the current directory of the script
-current_dir = os.path.dirname(os.path.abspath(__file__))
-
-# check if the bias_options.txt file exists, if not, create it
-bias_file = os.path.join(current_dir, "bias_options.txt")
-if not os.path.isfile(bias_file):
- with open(bias_file, "w") as f:
- f.write("*I am so happy*\n*I am so sad*\n*I am so excited*\n*I am so bored*\n*I am so angry*")
-
-# read bias options from the text file
-with open(bias_file, "r") as f:
- bias_options = [line.strip() for line in f.readlines()]
-
-params = {
- "activate": True,
- "bias string": " *I am so happy*",
- "use custom string": False,
-}
-
-
-def input_modifier(string):
- """
- This function is applied to your text inputs before
- they are fed into the model.
- """
- return string
-
-
-def output_modifier(string):
- """
- This function is applied to the model outputs.
- """
- return string
-
-
-def bot_prefix_modifier(string):
- """
- This function is only applied in chat mode. It modifies
- the prefix text for the Bot and can be used to bias its
- behavior.
- """
- if params['activate']:
- if params['use custom string']:
- return f'{string} {params["custom string"].strip()} '
- else:
- return f'{string} {params["bias string"].strip()} '
- else:
- return string
-
-
-def ui():
- # Gradio elements
- activate = gr.Checkbox(value=params['activate'], label='Activate character bias')
- dropdown_string = gr.Dropdown(choices=bias_options, value=params["bias string"], label='Character bias', info='To edit the options in this dropdown edit the "bias_options.txt" file')
- use_custom_string = gr.Checkbox(value=False, label='Use custom bias textbox instead of dropdown')
- custom_string = gr.Textbox(value="", placeholder="Enter custom bias string", label="Custom Character Bias", info='To use this textbox activate the checkbox above')
-
- # Event functions to update the parameters in the backend
- def update_bias_string(x):
- if x:
- params.update({"bias string": x})
- else:
- params.update({"bias string": dropdown_string.get()})
- return x
-
- def update_custom_string(x):
- params.update({"custom string": x})
-
- dropdown_string.change(update_bias_string, dropdown_string, None)
- custom_string.change(update_custom_string, custom_string, None)
- activate.change(lambda x: params.update({"activate": x}), activate, None)
- use_custom_string.change(lambda x: params.update({"use custom string": x}), use_custom_string, None)
-
- # Group elements together depending on the selected option
- def bias_string_group():
- if use_custom_string.value:
- return gr.Group([use_custom_string, custom_string])
- else:
- return dropdown_string
diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/cityscapes.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/cityscapes.py
deleted file mode 100644
index 81e47a914a1aa2e5458e18669d65ffb742f46fc6..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/cityscapes.py
+++ /dev/null
@@ -1,217 +0,0 @@
-import os.path as osp
-import tempfile
-
-import annotator.uniformer.mmcv as mmcv
-import numpy as np
-from annotator.uniformer.mmcv.utils import print_log
-from PIL import Image
-
-from .builder import DATASETS
-from .custom import CustomDataset
-
-
-@DATASETS.register_module()
-class CityscapesDataset(CustomDataset):
- """Cityscapes dataset.
-
- The ``img_suffix`` is fixed to '_leftImg8bit.png' and ``seg_map_suffix`` is
- fixed to '_gtFine_labelTrainIds.png' for Cityscapes dataset.
- """
-
- CLASSES = ('road', 'sidewalk', 'building', 'wall', 'fence', 'pole',
- 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky',
- 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
- 'bicycle')
-
- PALETTE = [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156],
- [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0],
- [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60],
- [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100],
- [0, 80, 100], [0, 0, 230], [119, 11, 32]]
-
- def __init__(self, **kwargs):
- super(CityscapesDataset, self).__init__(
- img_suffix='_leftImg8bit.png',
- seg_map_suffix='_gtFine_labelTrainIds.png',
- **kwargs)
-
- @staticmethod
- def _convert_to_label_id(result):
- """Convert trainId to id for cityscapes."""
- if isinstance(result, str):
- result = np.load(result)
- import cityscapesscripts.helpers.labels as CSLabels
- result_copy = result.copy()
- for trainId, label in CSLabels.trainId2label.items():
- result_copy[result == trainId] = label.id
-
- return result_copy
-
- def results2img(self, results, imgfile_prefix, to_label_id):
- """Write the segmentation results to images.
-
- Args:
- results (list[list | tuple | ndarray]): Testing results of the
- dataset.
- imgfile_prefix (str): The filename prefix of the png files.
- If the prefix is "somepath/xxx",
- the png files will be named "somepath/xxx.png".
- to_label_id (bool): whether convert output to label_id for
- submission
-
- Returns:
- list[str: str]: result txt files which contains corresponding
- semantic segmentation images.
- """
- mmcv.mkdir_or_exist(imgfile_prefix)
- result_files = []
- prog_bar = mmcv.ProgressBar(len(self))
- for idx in range(len(self)):
- result = results[idx]
- if to_label_id:
- result = self._convert_to_label_id(result)
- filename = self.img_infos[idx]['filename']
- basename = osp.splitext(osp.basename(filename))[0]
-
- png_filename = osp.join(imgfile_prefix, f'{basename}.png')
-
- output = Image.fromarray(result.astype(np.uint8)).convert('P')
- import cityscapesscripts.helpers.labels as CSLabels
- palette = np.zeros((len(CSLabels.id2label), 3), dtype=np.uint8)
- for label_id, label in CSLabels.id2label.items():
- palette[label_id] = label.color
-
- output.putpalette(palette)
- output.save(png_filename)
- result_files.append(png_filename)
- prog_bar.update()
-
- return result_files
-
- def format_results(self, results, imgfile_prefix=None, to_label_id=True):
- """Format the results into dir (standard format for Cityscapes
- evaluation).
-
- Args:
- results (list): Testing results of the dataset.
- imgfile_prefix (str | None): The prefix of images files. It
- includes the file path and the prefix of filename, e.g.,
- "a/b/prefix". If not specified, a temp file will be created.
- Default: None.
- to_label_id (bool): whether convert output to label_id for
- submission. Default: False
-
- Returns:
- tuple: (result_files, tmp_dir), result_files is a list containing
- the image paths, tmp_dir is the temporal directory created
- for saving json/png files when img_prefix is not specified.
- """
-
- assert isinstance(results, list), 'results must be a list'
- assert len(results) == len(self), (
- 'The length of results is not equal to the dataset len: '
- f'{len(results)} != {len(self)}')
-
- if imgfile_prefix is None:
- tmp_dir = tempfile.TemporaryDirectory()
- imgfile_prefix = tmp_dir.name
- else:
- tmp_dir = None
- result_files = self.results2img(results, imgfile_prefix, to_label_id)
-
- return result_files, tmp_dir
-
- def evaluate(self,
- results,
- metric='mIoU',
- logger=None,
- imgfile_prefix=None,
- efficient_test=False):
- """Evaluation in Cityscapes/default protocol.
-
- Args:
- results (list): Testing results of the dataset.
- metric (str | list[str]): Metrics to be evaluated.
- logger (logging.Logger | None | str): Logger used for printing
- related information during evaluation. Default: None.
- imgfile_prefix (str | None): The prefix of output image file,
- for cityscapes evaluation only. It includes the file path and
- the prefix of filename, e.g., "a/b/prefix".
- If results are evaluated with cityscapes protocol, it would be
- the prefix of output png files. The output files would be
- png images under folder "a/b/prefix/xxx.png", where "xxx" is
- the image name of cityscapes. If not specified, a temp file
- will be created for evaluation.
- Default: None.
-
- Returns:
- dict[str, float]: Cityscapes/default metrics.
- """
-
- eval_results = dict()
- metrics = metric.copy() if isinstance(metric, list) else [metric]
- if 'cityscapes' in metrics:
- eval_results.update(
- self._evaluate_cityscapes(results, logger, imgfile_prefix))
- metrics.remove('cityscapes')
- if len(metrics) > 0:
- eval_results.update(
- super(CityscapesDataset,
- self).evaluate(results, metrics, logger, efficient_test))
-
- return eval_results
-
- def _evaluate_cityscapes(self, results, logger, imgfile_prefix):
- """Evaluation in Cityscapes protocol.
-
- Args:
- results (list): Testing results of the dataset.
- logger (logging.Logger | str | None): Logger used for printing
- related information during evaluation. Default: None.
- imgfile_prefix (str | None): The prefix of output image file
-
- Returns:
- dict[str: float]: Cityscapes evaluation results.
- """
- try:
- import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as CSEval # noqa
- except ImportError:
- raise ImportError('Please run "pip install cityscapesscripts" to '
- 'install cityscapesscripts first.')
- msg = 'Evaluating in Cityscapes style'
- if logger is None:
- msg = '\n' + msg
- print_log(msg, logger=logger)
-
- result_files, tmp_dir = self.format_results(results, imgfile_prefix)
-
- if tmp_dir is None:
- result_dir = imgfile_prefix
- else:
- result_dir = tmp_dir.name
-
- eval_results = dict()
- print_log(f'Evaluating results under {result_dir} ...', logger=logger)
-
- CSEval.args.evalInstLevelScore = True
- CSEval.args.predictionPath = osp.abspath(result_dir)
- CSEval.args.evalPixelAccuracy = True
- CSEval.args.JSONOutput = False
-
- seg_map_list = []
- pred_list = []
-
- # when evaluating with official cityscapesscripts,
- # **_gtFine_labelIds.png is used
- for seg_map in mmcv.scandir(
- self.ann_dir, 'gtFine_labelIds.png', recursive=True):
- seg_map_list.append(osp.join(self.ann_dir, seg_map))
- pred_list.append(CSEval.getPrediction(CSEval.args, seg_map))
-
- eval_results.update(
- CSEval.evaluateImgLists(pred_list, seg_map_list, CSEval.args))
-
- if tmp_dir is not None:
- tmp_dir.cleanup()
-
- return eval_results
diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/tool_transfer_control.py b/spaces/Anonymous-sub/Rerender/ControlNet/tool_transfer_control.py
deleted file mode 100644
index b84442cc93f7f9c30cb7311b8675d9124a6e8ec9..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-sub/Rerender/ControlNet/tool_transfer_control.py
+++ /dev/null
@@ -1,59 +0,0 @@
-path_sd15 = './models/v1-5-pruned.ckpt'
-path_sd15_with_control = './models/control_sd15_openpose.pth'
-path_input = './models/anything-v3-full.safetensors'
-path_output = './models/control_any3_openpose.pth'
-
-
-import os
-
-
-assert os.path.exists(path_sd15), 'Input path_sd15 does not exists!'
-assert os.path.exists(path_sd15_with_control), 'Input path_sd15_with_control does not exists!'
-assert os.path.exists(path_input), 'Input path_input does not exists!'
-assert os.path.exists(os.path.dirname(path_output)), 'Output folder not exists!'
-
-
-import torch
-from share import *
-from cldm.model import load_state_dict
-
-
-sd15_state_dict = load_state_dict(path_sd15)
-sd15_with_control_state_dict = load_state_dict(path_sd15_with_control)
-input_state_dict = load_state_dict(path_input)
-
-
-def get_node_name(name, parent_name):
- if len(name) <= len(parent_name):
- return False, ''
- p = name[:len(parent_name)]
- if p != parent_name:
- return False, ''
- return True, name[len(parent_name):]
-
-
-keys = sd15_with_control_state_dict.keys()
-
-final_state_dict = {}
-for key in keys:
- is_first_stage, _ = get_node_name(key, 'first_stage_model')
- is_cond_stage, _ = get_node_name(key, 'cond_stage_model')
- if is_first_stage or is_cond_stage:
- final_state_dict[key] = input_state_dict[key]
- continue
- p = sd15_with_control_state_dict[key]
- is_control, node_name = get_node_name(key, 'control_')
- if is_control:
- sd15_key_name = 'model.diffusion_' + node_name
- else:
- sd15_key_name = key
- if sd15_key_name in input_state_dict:
- p_new = p + input_state_dict[sd15_key_name] - sd15_state_dict[sd15_key_name]
- # print(f'Offset clone from [{sd15_key_name}] to [{key}]')
- else:
- p_new = p
- # print(f'Direct clone to [{key}]')
- final_state_dict[key] = p_new
-
-torch.save(final_state_dict, path_output)
-print('Transferred model saved at ' + path_output)
diff --git a/spaces/Artrajz/vits-simple-api/voice.py b/spaces/Artrajz/vits-simple-api/voice.py
deleted file mode 100644
index 9cce5625519a1aedc0dae7e89be2513684daa4d4..0000000000000000000000000000000000000000
--- a/spaces/Artrajz/vits-simple-api/voice.py
+++ /dev/null
@@ -1,325 +0,0 @@
-import os
-import librosa
-import re
-import numpy as np
-import torch
-import xml.etree.ElementTree as ET
-import config
-import soundfile as sf
-from io import BytesIO
-from graiax import silkcoder
-from logger import logger
-from contants import ModelType
-from scipy.signal import resample_poly
-
-
-# torch.set_num_threads(1) # 设置torch线程为1
-
-
-class TTS:
- def __init__(self, voice_obj, voice_speakers, **kwargs):
- self._voice_obj = voice_obj
- self._voice_speakers = voice_speakers
- self._strength_dict = {"x-weak": 0.25, "weak": 0.5, "Medium": 0.75, "Strong": 1, "x-strong": 1.25}
- self._speakers_count = sum([len(self._voice_speakers[i]) for i in self._voice_speakers])
- self._vits_speakers_count = len(self._voice_speakers[ModelType.VITS.value])
- self._hubert_speakers_count = len(self._voice_speakers[ModelType.HUBERT_VITS.value])
- self._w2v2_speakers_count = len(self._voice_speakers[ModelType.W2V2_VITS.value])
- self._w2v2_emotion_count = kwargs.get("w2v2_emotion_count", 0)
- self._bert_vits2_speakers_count = len(self._voice_speakers[ModelType.BERT_VITS2.value])
- self.dem = None
-
- # Initialization information
- self.logger = logger
- self.logger.info(f"torch:{torch.__version__} cuda_available:{torch.cuda.is_available()}")
- self.logger.info(f'device:{kwargs.get("device")} device.type:{kwargs.get("device").type}')
-
- if getattr(config, "DIMENSIONAL_EMOTION_MODEL", None) != None:
- try:
- import audonnx
- root = os.path.dirname(config.DIMENSIONAL_EMOTION_MODEL)
- model_file = config.DIMENSIONAL_EMOTION_MODEL
- self.dem = audonnx.load(root=root, model_file=model_file)
- except Exception as e:
- self.logger.warning(f"Load DIMENSIONAL_EMOTION_MODEL failed {e}")
-
- if self._vits_speakers_count != 0: self.logger.info(f"[{ModelType.VITS.value}] {self._vits_speakers_count} speakers")
- if self._hubert_speakers_count != 0: self.logger.info(f"[{ModelType.HUBERT_VITS.value}] {self._hubert_speakers_count} speakers")
- if self._w2v2_speakers_count != 0: self.logger.info(f"[{ModelType.W2V2_VITS.value}] {self._w2v2_speakers_count} speakers")
- if self._bert_vits2_speakers_count != 0: self.logger.info(
- f"[{ModelType.BERT_VITS2.value}] {self._bert_vits2_speakers_count} speakers")
- self.logger.info(f"{self._speakers_count} speakers in total.")
- if self._speakers_count == 0:
- self.logger.warning(f"No model was loaded.")
-
- @property
- def voice_speakers(self):
- return self._voice_speakers
-
- @property
- def speakers_count(self):
- return self._speakers_count
-
- @property
- def vits_speakers_count(self):
- return self._vits_speakers_count
-
- @property
- def hubert_speakers_count(self):
- return self._hubert_speakers_count
-
- @property
- def w2v2_speakers_count(self):
- return self._w2v2_speakers_count
-
- @property
- def w2v2_emotion_count(self):
- return self._w2v2_emotion_count
-
- @property
- def bert_vits2_speakers_count(self):
- return self._bert_vits2_speakers_count
-
- def encode(self, sampling_rate, audio, format):
- with BytesIO() as f:
- if format.upper() == 'OGG':
- sf.write(f, audio, sampling_rate, format="ogg")
- return BytesIO(f.getvalue())
- elif format.upper() == 'SILK':
- sf.write(f, audio, sampling_rate, format="wav")
- return BytesIO(silkcoder.encode(f))
- elif format.upper() == 'MP3':
- sf.write(f, audio, sampling_rate, format="mp3")
- return BytesIO(f.getvalue())
- elif format.upper() == 'WAV':
- sf.write(f, audio, sampling_rate, format="wav")
- return BytesIO(f.getvalue())
- elif format.upper() == 'FLAC':
- sf.write(f, audio, sampling_rate, format="flac")
- return BytesIO(f.getvalue())
- else:
- raise ValueError(f"Unsupported format:{format}")
-
- def convert_time_string(self, time_string):
- time_value = float(re.findall(r'\d+\.?\d*', time_string)[0])
- time_unit = re.findall(r'[a-zA-Z]+', time_string)[0].lower()
-
- if time_unit.upper() == 'MS':
- return time_value / 1000
- elif time_unit.upper() == 'S':
- return time_value
- elif time_unit.upper() == 'MIN':
- return time_value * 60
- elif time_unit.upper() == 'H':
- return time_value * 3600
- elif time_unit.upper() == 'D':
- return time_value * 24 * 3600 # 不会有人真写D吧?
- else:
- raise ValueError("Unsupported time unit: {}".format(time_unit))
-
- def generate_audio_chunks(self, audio):
- chunk_size = 4096
- while True:
- chunk = audio.read(chunk_size)
- if not chunk:
- break
- yield chunk
-
- def resample_audio(self, audio, orig_sr, target_sr):
- if orig_sr == target_sr:
- return audio
-
- gcd = np.gcd(orig_sr, target_sr)
- audio = resample_poly(audio, target_sr // gcd, orig_sr // gcd)
-
- return audio
-
- def parse_ssml(self, ssml):
- root = ET.fromstring(ssml)
- format = root.attrib.get("format", "wav")
- voice_tasks = []
- brk_count = 0
- strength_dict = {"x-weak": 0.25, "weak": 0.5, "Medium": 0.75, "Strong": 1, "x-strong": 1.25}
-
- for element in root.iter():
- if element.tag == "voice":
- id = int(element.attrib.get("id", root.attrib.get("id", config.ID)))
- lang = element.attrib.get("lang", root.attrib.get("lang", config.LANG))
- length = float(element.attrib.get("length", root.attrib.get("length", config.LENGTH)))
- noise = float(element.attrib.get("noise", root.attrib.get("noise", config.NOISE)))
- noisew = float(element.attrib.get("noisew", root.attrib.get("noisew", config.NOISEW)))
- max = int(element.attrib.get("max", root.attrib.get("max", "0")))
- # 不填写默认就是vits
- model_type = element.attrib.get("model_type", root.attrib.get("model_type", "vits"))
- # w2v2-vits/emotion-vits才有emotion
- emotion = int(element.attrib.get("emotion", root.attrib.get("emotion", 0)))
- # Bert-VITS2的参数
- sdp_ratio = int(element.attrib.get("sdp_ratio", root.attrib.get("sdp_ratio", config.SDP_RATIO)))
-
- voice_element = ET.tostring(element, encoding='unicode')
-
- pattern_voice = r'(.*?)'
- pattern_break = r''
-
- matches_voice = re.findall(pattern_voice, voice_element)[0]
- matches_break = re.split(pattern_break, matches_voice)
- for match in matches_break:
- strength = re.search(r'\s*strength\s*=\s*[\'\"](.*?)[\'\"]', match)
- time = re.search(r'\s*time\s*=\s*[\'\"](.*?)[\'\"]', match)
- # break标签 strength属性
- if strength:
- brk = strength_dict[strength.group(1)]
- voice_tasks.append({"break": brk})
- brk_count += 1
- # break标签 time属性
- elif time:
- brk = self.convert_time_string(time.group(1))
- voice_tasks.append({"break": brk})
- brk_count += 1
- # break标签 为空说明只写了break,默认停顿0.75s
- elif match == "":
- voice_tasks.append({"break": 0.75})
- brk_count += 1
- # voice标签中除了break剩下的就是文本
- else:
- voice_tasks.append({"id": id,
- "text": match,
- "lang": lang,
- "length": length,
- "noise": noise,
- "noisew": noisew,
- "max": max,
- "model_type": model_type,
- "emotion": emotion,
- "sdp_ratio": sdp_ratio
- })
-
- # 分段末尾停顿0.75s
- voice_tasks.append({"break": 0.75})
- elif element.tag == "break":
- # brk_count大于0说明voice标签中有break
- if brk_count > 0:
- brk_count -= 1
- continue
- brk = strength_dict.get(element.attrib.get("strength"),
- self.convert_time_string(element.attrib.get("time", "750ms")))
- voice_tasks.append({"break": brk})
-
- for i in voice_tasks:
- self.logger.debug(i)
-
- return voice_tasks, format
-
- def process_ssml_infer_task(self, tasks, format):
- audios = []
- sampling_rates = []
- last_sampling_rate = 22050
- for task in tasks:
- if task.get("break"):
- audios.append(np.zeros(int(task.get("break") * 22050), dtype=np.int16))
- sampling_rates.append(last_sampling_rate)
- else:
- model_type_str = task.get("model_type").upper()
- if model_type_str not in [ModelType.VITS.value, ModelType.W2V2_VITS.value, ModelType.BERT_VITS2.value]:
- raise ValueError(f"Unsupported model type: {task.get('model_type')}")
- model_type = ModelType(model_type_str)
- voice_obj = self._voice_obj[model_type][task.get("id")][1]
- real_id = self._voice_obj[model_type][task.get("id")][0]
- task["id"] = real_id
- sampling_rates.append(voice_obj.sampling_rate)
- last_sampling_rate = voice_obj.sampling_rate
- audio = voice_obj.get_audio(task)
- audios.append(audio)
- # 得到最高的采样率
- target_sr = max(sampling_rates)
- # 所有音频要与最高采样率保持一致
- resampled_audios = [self.resample_audio(audio, sr, target_sr) for audio, sr in zip(audios, sampling_rates)]
- audio = np.concatenate(resampled_audios, axis=0)
- encoded_audio = self.encode(target_sr, audio, format)
- return encoded_audio
-
- def vits_infer(self, task):
- format = task.get("format", "wav")
- voice_obj = self._voice_obj[ModelType.VITS][task.get("id")][1]
- real_id = self._voice_obj[ModelType.VITS][task.get("id")][0]
- task["id"] = real_id # Change to real id
- sampling_rate = voice_obj.sampling_rate
- audio = voice_obj.get_audio(task, auto_break=True)
- encoded_audio = self.encode(sampling_rate, audio, format)
- return encoded_audio
-
- def stream_vits_infer(self, task, fname=None):
- format = task.get("format", "wav")
- voice_obj = self._voice_obj[ModelType.VITS][task.get("id")][1]
- task["id"] = self._voice_obj[ModelType.VITS][task.get("id")][0]
- sampling_rate = voice_obj.sampling_rate
- genertator = voice_obj.get_stream_audio(task, auto_break=True)
- # audio = BytesIO()
- for chunk in genertator:
- encoded_audio = self.encode(sampling_rate, chunk, format)
- for encoded_audio_chunk in self.generate_audio_chunks(encoded_audio):
- yield encoded_audio_chunk
- # if getattr(config, "SAVE_AUDIO", False):
- # audio.write(encoded_audio.getvalue())
- # if getattr(config, "SAVE_AUDIO", False):
- # path = f"{config.CACHE_PATH}/{fname}"
- # utils.save_audio(audio.getvalue(), path)
-
- def hubert_vits_infer(self, task):
- format = task.get("format", "wav")
- voice_obj = self._voice_obj[ModelType.HUBERT_VITS][task.get("id")][1]
- task["id"] = self._voice_obj[ModelType.HUBERT_VITS][task.get("id")][0]
- sampling_rate = voice_obj.sampling_rate
- audio = voice_obj.get_audio(task)
- encoded_audio = self.encode(sampling_rate, audio, format)
- return encoded_audio
-
- def w2v2_vits_infer(self, task):
- format = task.get("format", "wav")
- voice_obj = self._voice_obj[ModelType.W2V2_VITS][task.get("id")][1]
- task["id"] = self._voice_obj[ModelType.W2V2_VITS][task.get("id")][0]
- sampling_rate = voice_obj.sampling_rate
- audio = voice_obj.get_audio(task, auto_break=True)
- encoded_audio = self.encode(sampling_rate, audio, format)
- return encoded_audio
-
- def vits_voice_conversion(self, task):
- original_id = task.get("original_id")
- target_id = task.get("target_id")
- format = task.get("format")
-
- original_id_obj = int(self._voice_obj[ModelType.VITS][original_id][2])
- target_id_obj = int(self._voice_obj[ModelType.VITS][target_id][2])
-
- if original_id_obj != target_id_obj:
- raise ValueError(f"speakers are in diffrent VITS Model")
-
- task["original_id"] = int(self._voice_obj[ModelType.VITS][original_id][0])
- task["target_id"] = int(self._voice_obj[ModelType.VITS][target_id][0])
-
- voice_obj = self._voice_obj[ModelType.VITS][original_id][1]
- sampling_rate = voice_obj.sampling_rate
-
- audio = voice_obj.voice_conversion(task)
- encoded_audio = self.encode(sampling_rate, audio, format)
- return encoded_audio
-
- def get_dimensional_emotion_npy(self, audio):
- if self.dem is None:
- raise ValueError(f"Please configure DIMENSIONAL_EMOTION_MODEL path in config.py")
- audio16000, sampling_rate = librosa.load(audio, sr=16000, mono=True)
- emotion = self.dem(audio16000, sampling_rate)['hidden_states']
- emotion_npy = BytesIO()
- np.save(emotion_npy, emotion.squeeze(0))
- emotion_npy.seek(0)
-
- return emotion_npy
-
- def bert_vits2_infer(self, task):
- format = task.get("format", "wav")
- voice_obj = self._voice_obj[ModelType.BERT_VITS2][task.get("id")][1]
- task["id"] = self._voice_obj[ModelType.BERT_VITS2][task.get("id")][0]
- sampling_rate = voice_obj.sampling_rate
- audio = voice_obj.get_audio(task, auto_break=True)
- encoded_audio = self.encode(sampling_rate, audio, format)
- return encoded_audio
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pkg_resources/__init__.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pkg_resources/__init__.py
deleted file mode 100644
index 1bf26a94226d65089cbc1e50a40c719692517470..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pkg_resources/__init__.py
+++ /dev/null
@@ -1,3360 +0,0 @@
-"""
-Package resource API
---------------------
-
-A resource is a logical file contained within a package, or a logical
-subdirectory thereof. The package resource API expects resource names
-to have their path parts separated with ``/``, *not* whatever the local
-path separator is. Do not use os.path operations to manipulate resource
-names being passed into the API.
-
-The package resource API is designed to work with normal filesystem packages,
-.egg files, and unpacked .egg files. It can also work in a limited way with
-.zip files and with custom PEP 302 loaders that support the ``get_data()``
-method.
-
-This module is deprecated. Users are directed to
-`importlib.resources `_
-and
-`importlib.metadata `_
-instead.
-"""
-
-import sys
-import os
-import io
-import time
-import re
-import types
-import zipfile
-import zipimport
-import warnings
-import stat
-import functools
-import pkgutil
-import operator
-import platform
-import collections
-import plistlib
-import email.parser
-import errno
-import tempfile
-import textwrap
-import inspect
-import ntpath
-import posixpath
-import importlib
-from pkgutil import get_importer
-
-try:
- import _imp
-except ImportError:
- # Python 3.2 compatibility
- import imp as _imp
-
-try:
- FileExistsError
-except NameError:
- FileExistsError = OSError
-
-# capture these to bypass sandboxing
-from os import utime
-
-try:
- from os import mkdir, rename, unlink
-
- WRITE_SUPPORT = True
-except ImportError:
- # no write support, probably under GAE
- WRITE_SUPPORT = False
-
-from os import open as os_open
-from os.path import isdir, split
-
-try:
- import importlib.machinery as importlib_machinery
-
- # access attribute to force import under delayed import mechanisms.
- importlib_machinery.__name__
-except ImportError:
- importlib_machinery = None
-
-from pip._internal.utils._jaraco_text import (
- yield_lines,
- drop_comment,
- join_continuation,
-)
-
-from pip._vendor import platformdirs
-from pip._vendor import packaging
-
-__import__('pip._vendor.packaging.version')
-__import__('pip._vendor.packaging.specifiers')
-__import__('pip._vendor.packaging.requirements')
-__import__('pip._vendor.packaging.markers')
-__import__('pip._vendor.packaging.utils')
-
-if sys.version_info < (3, 5):
- raise RuntimeError("Python 3.5 or later is required")
-
-# declare some globals that will be defined later to
-# satisfy the linters.
-require = None
-working_set = None
-add_activation_listener = None
-resources_stream = None
-cleanup_resources = None
-resource_dir = None
-resource_stream = None
-set_extraction_path = None
-resource_isdir = None
-resource_string = None
-iter_entry_points = None
-resource_listdir = None
-resource_filename = None
-resource_exists = None
-_distribution_finders = None
-_namespace_handlers = None
-_namespace_packages = None
-
-
-warnings.warn("pkg_resources is deprecated as an API", DeprecationWarning)
-
-
-_PEP440_FALLBACK = re.compile(r"^v?(?P(?:[0-9]+!)?[0-9]+(?:\.[0-9]+)*)", re.I)
-
-
-class PEP440Warning(RuntimeWarning):
- """
- Used when there is an issue with a version or specifier not complying with
- PEP 440.
- """
-
-
-parse_version = packaging.version.Version
-
-
-_state_vars = {}
-
-
-def _declare_state(vartype, **kw):
- globals().update(kw)
- _state_vars.update(dict.fromkeys(kw, vartype))
-
-
-def __getstate__():
- state = {}
- g = globals()
- for k, v in _state_vars.items():
- state[k] = g['_sget_' + v](g[k])
- return state
-
-
-def __setstate__(state):
- g = globals()
- for k, v in state.items():
- g['_sset_' + _state_vars[k]](k, g[k], v)
- return state
-
-
-def _sget_dict(val):
- return val.copy()
-
-
-def _sset_dict(key, ob, state):
- ob.clear()
- ob.update(state)
-
-
-def _sget_object(val):
- return val.__getstate__()
-
-
-def _sset_object(key, ob, state):
- ob.__setstate__(state)
-
-
-_sget_none = _sset_none = lambda *args: None
-
-
-def get_supported_platform():
- """Return this platform's maximum compatible version.
-
- distutils.util.get_platform() normally reports the minimum version
- of macOS that would be required to *use* extensions produced by
- distutils. But what we want when checking compatibility is to know the
- version of macOS that we are *running*. To allow usage of packages that
- explicitly require a newer version of macOS, we must also know the
- current version of the OS.
-
- If this condition occurs for any other platform with a version in its
- platform strings, this function should be extended accordingly.
- """
- plat = get_build_platform()
- m = macosVersionString.match(plat)
- if m is not None and sys.platform == "darwin":
- try:
- plat = 'macosx-%s-%s' % ('.'.join(_macos_vers()[:2]), m.group(3))
- except ValueError:
- # not macOS
- pass
- return plat
-
-
-__all__ = [
- # Basic resource access and distribution/entry point discovery
- 'require',
- 'run_script',
- 'get_provider',
- 'get_distribution',
- 'load_entry_point',
- 'get_entry_map',
- 'get_entry_info',
- 'iter_entry_points',
- 'resource_string',
- 'resource_stream',
- 'resource_filename',
- 'resource_listdir',
- 'resource_exists',
- 'resource_isdir',
- # Environmental control
- 'declare_namespace',
- 'working_set',
- 'add_activation_listener',
- 'find_distributions',
- 'set_extraction_path',
- 'cleanup_resources',
- 'get_default_cache',
- # Primary implementation classes
- 'Environment',
- 'WorkingSet',
- 'ResourceManager',
- 'Distribution',
- 'Requirement',
- 'EntryPoint',
- # Exceptions
- 'ResolutionError',
- 'VersionConflict',
- 'DistributionNotFound',
- 'UnknownExtra',
- 'ExtractionError',
- # Warnings
- 'PEP440Warning',
- # Parsing functions and string utilities
- 'parse_requirements',
- 'parse_version',
- 'safe_name',
- 'safe_version',
- 'get_platform',
- 'compatible_platforms',
- 'yield_lines',
- 'split_sections',
- 'safe_extra',
- 'to_filename',
- 'invalid_marker',
- 'evaluate_marker',
- # filesystem utilities
- 'ensure_directory',
- 'normalize_path',
- # Distribution "precedence" constants
- 'EGG_DIST',
- 'BINARY_DIST',
- 'SOURCE_DIST',
- 'CHECKOUT_DIST',
- 'DEVELOP_DIST',
- # "Provider" interfaces, implementations, and registration/lookup APIs
- 'IMetadataProvider',
- 'IResourceProvider',
- 'FileMetadata',
- 'PathMetadata',
- 'EggMetadata',
- 'EmptyProvider',
- 'empty_provider',
- 'NullProvider',
- 'EggProvider',
- 'DefaultProvider',
- 'ZipProvider',
- 'register_finder',
- 'register_namespace_handler',
- 'register_loader_type',
- 'fixup_namespace_packages',
- 'get_importer',
- # Warnings
- 'PkgResourcesDeprecationWarning',
- # Deprecated/backward compatibility only
- 'run_main',
- 'AvailableDistributions',
-]
-
-
-class ResolutionError(Exception):
- """Abstract base for dependency resolution errors"""
-
- def __repr__(self):
- return self.__class__.__name__ + repr(self.args)
-
-
-class VersionConflict(ResolutionError):
- """
- An already-installed version conflicts with the requested version.
-
- Should be initialized with the installed Distribution and the requested
- Requirement.
- """
-
- _template = "{self.dist} is installed but {self.req} is required"
-
- @property
- def dist(self):
- return self.args[0]
-
- @property
- def req(self):
- return self.args[1]
-
- def report(self):
- return self._template.format(**locals())
-
- def with_context(self, required_by):
- """
- If required_by is non-empty, return a version of self that is a
- ContextualVersionConflict.
- """
- if not required_by:
- return self
- args = self.args + (required_by,)
- return ContextualVersionConflict(*args)
-
-
-class ContextualVersionConflict(VersionConflict):
- """
- A VersionConflict that accepts a third parameter, the set of the
- requirements that required the installed Distribution.
- """
-
- _template = VersionConflict._template + ' by {self.required_by}'
-
- @property
- def required_by(self):
- return self.args[2]
-
-
-class DistributionNotFound(ResolutionError):
- """A requested distribution was not found"""
-
- _template = (
- "The '{self.req}' distribution was not found "
- "and is required by {self.requirers_str}"
- )
-
- @property
- def req(self):
- return self.args[0]
-
- @property
- def requirers(self):
- return self.args[1]
-
- @property
- def requirers_str(self):
- if not self.requirers:
- return 'the application'
- return ', '.join(self.requirers)
-
- def report(self):
- return self._template.format(**locals())
-
- def __str__(self):
- return self.report()
-
-
-class UnknownExtra(ResolutionError):
- """Distribution doesn't have an "extra feature" of the given name"""
-
-
-_provider_factories = {}
-
-PY_MAJOR = '{}.{}'.format(*sys.version_info)
-EGG_DIST = 3
-BINARY_DIST = 2
-SOURCE_DIST = 1
-CHECKOUT_DIST = 0
-DEVELOP_DIST = -1
-
-
-def register_loader_type(loader_type, provider_factory):
- """Register `provider_factory` to make providers for `loader_type`
-
- `loader_type` is the type or class of a PEP 302 ``module.__loader__``,
- and `provider_factory` is a function that, passed a *module* object,
- returns an ``IResourceProvider`` for that module.
- """
- _provider_factories[loader_type] = provider_factory
-
-
-def get_provider(moduleOrReq):
- """Return an IResourceProvider for the named module or requirement"""
- if isinstance(moduleOrReq, Requirement):
- return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
- try:
- module = sys.modules[moduleOrReq]
- except KeyError:
- __import__(moduleOrReq)
- module = sys.modules[moduleOrReq]
- loader = getattr(module, '__loader__', None)
- return _find_adapter(_provider_factories, loader)(module)
-
-
-def _macos_vers(_cache=[]):
- if not _cache:
- version = platform.mac_ver()[0]
- # fallback for MacPorts
- if version == '':
- plist = '/System/Library/CoreServices/SystemVersion.plist'
- if os.path.exists(plist):
- if hasattr(plistlib, 'readPlist'):
- plist_content = plistlib.readPlist(plist)
- if 'ProductVersion' in plist_content:
- version = plist_content['ProductVersion']
-
- _cache.append(version.split('.'))
- return _cache[0]
-
-
-def _macos_arch(machine):
- return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
-
-
-def get_build_platform():
- """Return this platform's string for platform-specific distributions
-
- XXX Currently this is the same as ``distutils.util.get_platform()``, but it
- needs some hacks for Linux and macOS.
- """
- from sysconfig import get_platform
-
- plat = get_platform()
- if sys.platform == "darwin" and not plat.startswith('macosx-'):
- try:
- version = _macos_vers()
- machine = os.uname()[4].replace(" ", "_")
- return "macosx-%d.%d-%s" % (
- int(version[0]),
- int(version[1]),
- _macos_arch(machine),
- )
- except ValueError:
- # if someone is running a non-Mac darwin system, this will fall
- # through to the default implementation
- pass
- return plat
-
-
-macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
-darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
-# XXX backward compat
-get_platform = get_build_platform
-
-
-def compatible_platforms(provided, required):
- """Can code for the `provided` platform run on the `required` platform?
-
- Returns true if either platform is ``None``, or the platforms are equal.
-
- XXX Needs compatibility checks for Linux and other unixy OSes.
- """
- if provided is None or required is None or provided == required:
- # easy case
- return True
-
- # macOS special cases
- reqMac = macosVersionString.match(required)
- if reqMac:
- provMac = macosVersionString.match(provided)
-
- # is this a Mac package?
- if not provMac:
- # this is backwards compatibility for packages built before
- # setuptools 0.6. All packages built after this point will
- # use the new macOS designation.
- provDarwin = darwinVersionString.match(provided)
- if provDarwin:
- dversion = int(provDarwin.group(1))
- macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
- if (
- dversion == 7
- and macosversion >= "10.3"
- or dversion == 8
- and macosversion >= "10.4"
- ):
- return True
- # egg isn't macOS or legacy darwin
- return False
-
- # are they the same major version and machine type?
- if provMac.group(1) != reqMac.group(1) or provMac.group(3) != reqMac.group(3):
- return False
-
- # is the required OS major update >= the provided one?
- if int(provMac.group(2)) > int(reqMac.group(2)):
- return False
-
- return True
-
- # XXX Linux and other platforms' special cases should go here
- return False
-
-
-def run_script(dist_spec, script_name):
- """Locate distribution `dist_spec` and run its `script_name` script"""
- ns = sys._getframe(1).f_globals
- name = ns['__name__']
- ns.clear()
- ns['__name__'] = name
- require(dist_spec)[0].run_script(script_name, ns)
-
-
-# backward compatibility
-run_main = run_script
-
-
-def get_distribution(dist):
- """Return a current distribution object for a Requirement or string"""
- if isinstance(dist, str):
- dist = Requirement.parse(dist)
- if isinstance(dist, Requirement):
- dist = get_provider(dist)
- if not isinstance(dist, Distribution):
- raise TypeError("Expected string, Requirement, or Distribution", dist)
- return dist
-
-
-def load_entry_point(dist, group, name):
- """Return `name` entry point of `group` for `dist` or raise ImportError"""
- return get_distribution(dist).load_entry_point(group, name)
-
-
-def get_entry_map(dist, group=None):
- """Return the entry point map for `group`, or the full entry map"""
- return get_distribution(dist).get_entry_map(group)
-
-
-def get_entry_info(dist, group, name):
- """Return the EntryPoint object for `group`+`name`, or ``None``"""
- return get_distribution(dist).get_entry_info(group, name)
-
-
-class IMetadataProvider:
- def has_metadata(name):
- """Does the package's distribution contain the named metadata?"""
-
- def get_metadata(name):
- """The named metadata resource as a string"""
-
- def get_metadata_lines(name):
- """Yield named metadata resource as list of non-blank non-comment lines
-
- Leading and trailing whitespace is stripped from each line, and lines
- with ``#`` as the first non-blank character are omitted."""
-
- def metadata_isdir(name):
- """Is the named metadata a directory? (like ``os.path.isdir()``)"""
-
- def metadata_listdir(name):
- """List of metadata names in the directory (like ``os.listdir()``)"""
-
- def run_script(script_name, namespace):
- """Execute the named script in the supplied namespace dictionary"""
-
-
-class IResourceProvider(IMetadataProvider):
- """An object that provides access to package resources"""
-
- def get_resource_filename(manager, resource_name):
- """Return a true filesystem path for `resource_name`
-
- `manager` must be an ``IResourceManager``"""
-
- def get_resource_stream(manager, resource_name):
- """Return a readable file-like object for `resource_name`
-
- `manager` must be an ``IResourceManager``"""
-
- def get_resource_string(manager, resource_name):
- """Return a string containing the contents of `resource_name`
-
- `manager` must be an ``IResourceManager``"""
-
- def has_resource(resource_name):
- """Does the package contain the named resource?"""
-
- def resource_isdir(resource_name):
- """Is the named resource a directory? (like ``os.path.isdir()``)"""
-
- def resource_listdir(resource_name):
- """List of resource names in the directory (like ``os.listdir()``)"""
-
-
-class WorkingSet:
- """A collection of active distributions on sys.path (or a similar list)"""
-
- def __init__(self, entries=None):
- """Create working set from list of path entries (default=sys.path)"""
- self.entries = []
- self.entry_keys = {}
- self.by_key = {}
- self.normalized_to_canonical_keys = {}
- self.callbacks = []
-
- if entries is None:
- entries = sys.path
-
- for entry in entries:
- self.add_entry(entry)
-
- @classmethod
- def _build_master(cls):
- """
- Prepare the master working set.
- """
- ws = cls()
- try:
- from __main__ import __requires__
- except ImportError:
- # The main program does not list any requirements
- return ws
-
- # ensure the requirements are met
- try:
- ws.require(__requires__)
- except VersionConflict:
- return cls._build_from_requirements(__requires__)
-
- return ws
-
- @classmethod
- def _build_from_requirements(cls, req_spec):
- """
- Build a working set from a requirement spec. Rewrites sys.path.
- """
- # try it without defaults already on sys.path
- # by starting with an empty path
- ws = cls([])
- reqs = parse_requirements(req_spec)
- dists = ws.resolve(reqs, Environment())
- for dist in dists:
- ws.add(dist)
-
- # add any missing entries from sys.path
- for entry in sys.path:
- if entry not in ws.entries:
- ws.add_entry(entry)
-
- # then copy back to sys.path
- sys.path[:] = ws.entries
- return ws
-
- def add_entry(self, entry):
- """Add a path item to ``.entries``, finding any distributions on it
-
- ``find_distributions(entry, True)`` is used to find distributions
- corresponding to the path entry, and they are added. `entry` is
- always appended to ``.entries``, even if it is already present.
- (This is because ``sys.path`` can contain the same value more than
- once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
- equal ``sys.path``.)
- """
- self.entry_keys.setdefault(entry, [])
- self.entries.append(entry)
- for dist in find_distributions(entry, True):
- self.add(dist, entry, False)
-
- def __contains__(self, dist):
- """True if `dist` is the active distribution for its project"""
- return self.by_key.get(dist.key) == dist
-
- def find(self, req):
- """Find a distribution matching requirement `req`
-
- If there is an active distribution for the requested project, this
- returns it as long as it meets the version requirement specified by
- `req`. But, if there is an active distribution for the project and it
- does *not* meet the `req` requirement, ``VersionConflict`` is raised.
- If there is no active distribution for the requested project, ``None``
- is returned.
- """
- dist = self.by_key.get(req.key)
-
- if dist is None:
- canonical_key = self.normalized_to_canonical_keys.get(req.key)
-
- if canonical_key is not None:
- req.key = canonical_key
- dist = self.by_key.get(canonical_key)
-
- if dist is not None and dist not in req:
- # XXX add more info
- raise VersionConflict(dist, req)
- return dist
-
- def iter_entry_points(self, group, name=None):
- """Yield entry point objects from `group` matching `name`
-
- If `name` is None, yields all entry points in `group` from all
- distributions in the working set, otherwise only ones matching
- both `group` and `name` are yielded (in distribution order).
- """
- return (
- entry
- for dist in self
- for entry in dist.get_entry_map(group).values()
- if name is None or name == entry.name
- )
-
- def run_script(self, requires, script_name):
- """Locate distribution for `requires` and run `script_name` script"""
- ns = sys._getframe(1).f_globals
- name = ns['__name__']
- ns.clear()
- ns['__name__'] = name
- self.require(requires)[0].run_script(script_name, ns)
-
- def __iter__(self):
- """Yield distributions for non-duplicate projects in the working set
-
- The yield order is the order in which the items' path entries were
- added to the working set.
- """
- seen = {}
- for item in self.entries:
- if item not in self.entry_keys:
- # workaround a cache issue
- continue
-
- for key in self.entry_keys[item]:
- if key not in seen:
- seen[key] = 1
- yield self.by_key[key]
-
- def add(self, dist, entry=None, insert=True, replace=False):
- """Add `dist` to working set, associated with `entry`
-
- If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
- On exit from this routine, `entry` is added to the end of the working
- set's ``.entries`` (if it wasn't already present).
-
- `dist` is only added to the working set if it's for a project that
- doesn't already have a distribution in the set, unless `replace=True`.
- If it's added, any callbacks registered with the ``subscribe()`` method
- will be called.
- """
- if insert:
- dist.insert_on(self.entries, entry, replace=replace)
-
- if entry is None:
- entry = dist.location
- keys = self.entry_keys.setdefault(entry, [])
- keys2 = self.entry_keys.setdefault(dist.location, [])
- if not replace and dist.key in self.by_key:
- # ignore hidden distros
- return
-
- self.by_key[dist.key] = dist
- normalized_name = packaging.utils.canonicalize_name(dist.key)
- self.normalized_to_canonical_keys[normalized_name] = dist.key
- if dist.key not in keys:
- keys.append(dist.key)
- if dist.key not in keys2:
- keys2.append(dist.key)
- self._added_new(dist)
-
- def resolve(
- self,
- requirements,
- env=None,
- installer=None,
- replace_conflicting=False,
- extras=None,
- ):
- """List all distributions needed to (recursively) meet `requirements`
-
- `requirements` must be a sequence of ``Requirement`` objects. `env`,
- if supplied, should be an ``Environment`` instance. If
- not supplied, it defaults to all distributions available within any
- entry or distribution in the working set. `installer`, if supplied,
- will be invoked with each requirement that cannot be met by an
- already-installed distribution; it should return a ``Distribution`` or
- ``None``.
-
- Unless `replace_conflicting=True`, raises a VersionConflict exception
- if
- any requirements are found on the path that have the correct name but
- the wrong version. Otherwise, if an `installer` is supplied it will be
- invoked to obtain the correct version of the requirement and activate
- it.
-
- `extras` is a list of the extras to be used with these requirements.
- This is important because extra requirements may look like `my_req;
- extra = "my_extra"`, which would otherwise be interpreted as a purely
- optional requirement. Instead, we want to be able to assert that these
- requirements are truly required.
- """
-
- # set up the stack
- requirements = list(requirements)[::-1]
- # set of processed requirements
- processed = {}
- # key -> dist
- best = {}
- to_activate = []
-
- req_extras = _ReqExtras()
-
- # Mapping of requirement to set of distributions that required it;
- # useful for reporting info about conflicts.
- required_by = collections.defaultdict(set)
-
- while requirements:
- # process dependencies breadth-first
- req = requirements.pop(0)
- if req in processed:
- # Ignore cyclic or redundant dependencies
- continue
-
- if not req_extras.markers_pass(req, extras):
- continue
-
- dist = self._resolve_dist(
- req, best, replace_conflicting, env, installer, required_by, to_activate
- )
-
- # push the new requirements onto the stack
- new_requirements = dist.requires(req.extras)[::-1]
- requirements.extend(new_requirements)
-
- # Register the new requirements needed by req
- for new_requirement in new_requirements:
- required_by[new_requirement].add(req.project_name)
- req_extras[new_requirement] = req.extras
-
- processed[req] = True
-
- # return list of distros to activate
- return to_activate
-
- def _resolve_dist(
- self, req, best, replace_conflicting, env, installer, required_by, to_activate
- ):
- dist = best.get(req.key)
- if dist is None:
- # Find the best distribution and add it to the map
- dist = self.by_key.get(req.key)
- if dist is None or (dist not in req and replace_conflicting):
- ws = self
- if env is None:
- if dist is None:
- env = Environment(self.entries)
- else:
- # Use an empty environment and workingset to avoid
- # any further conflicts with the conflicting
- # distribution
- env = Environment([])
- ws = WorkingSet([])
- dist = best[req.key] = env.best_match(
- req, ws, installer, replace_conflicting=replace_conflicting
- )
- if dist is None:
- requirers = required_by.get(req, None)
- raise DistributionNotFound(req, requirers)
- to_activate.append(dist)
- if dist not in req:
- # Oops, the "best" so far conflicts with a dependency
- dependent_req = required_by[req]
- raise VersionConflict(dist, req).with_context(dependent_req)
- return dist
-
- def find_plugins(self, plugin_env, full_env=None, installer=None, fallback=True):
- """Find all activatable distributions in `plugin_env`
-
- Example usage::
-
- distributions, errors = working_set.find_plugins(
- Environment(plugin_dirlist)
- )
- # add plugins+libs to sys.path
- map(working_set.add, distributions)
- # display errors
- print('Could not load', errors)
-
- The `plugin_env` should be an ``Environment`` instance that contains
- only distributions that are in the project's "plugin directory" or
- directories. The `full_env`, if supplied, should be an ``Environment``
- contains all currently-available distributions. If `full_env` is not
- supplied, one is created automatically from the ``WorkingSet`` this
- method is called on, which will typically mean that every directory on
- ``sys.path`` will be scanned for distributions.
-
- `installer` is a standard installer callback as used by the
- ``resolve()`` method. The `fallback` flag indicates whether we should
- attempt to resolve older versions of a plugin if the newest version
- cannot be resolved.
-
- This method returns a 2-tuple: (`distributions`, `error_info`), where
- `distributions` is a list of the distributions found in `plugin_env`
- that were loadable, along with any other distributions that are needed
- to resolve their dependencies. `error_info` is a dictionary mapping
- unloadable plugin distributions to an exception instance describing the
- error that occurred. Usually this will be a ``DistributionNotFound`` or
- ``VersionConflict`` instance.
- """
-
- plugin_projects = list(plugin_env)
- # scan project names in alphabetic order
- plugin_projects.sort()
-
- error_info = {}
- distributions = {}
-
- if full_env is None:
- env = Environment(self.entries)
- env += plugin_env
- else:
- env = full_env + plugin_env
-
- shadow_set = self.__class__([])
- # put all our entries in shadow_set
- list(map(shadow_set.add, self))
-
- for project_name in plugin_projects:
- for dist in plugin_env[project_name]:
- req = [dist.as_requirement()]
-
- try:
- resolvees = shadow_set.resolve(req, env, installer)
-
- except ResolutionError as v:
- # save error info
- error_info[dist] = v
- if fallback:
- # try the next older version of project
- continue
- else:
- # give up on this project, keep going
- break
-
- else:
- list(map(shadow_set.add, resolvees))
- distributions.update(dict.fromkeys(resolvees))
-
- # success, no need to try any more versions of this project
- break
-
- distributions = list(distributions)
- distributions.sort()
-
- return distributions, error_info
-
- def require(self, *requirements):
- """Ensure that distributions matching `requirements` are activated
-
- `requirements` must be a string or a (possibly-nested) sequence
- thereof, specifying the distributions and versions required. The
- return value is a sequence of the distributions that needed to be
- activated to fulfill the requirements; all relevant distributions are
- included, even if they were already activated in this working set.
- """
- needed = self.resolve(parse_requirements(requirements))
-
- for dist in needed:
- self.add(dist)
-
- return needed
-
- def subscribe(self, callback, existing=True):
- """Invoke `callback` for all distributions
-
- If `existing=True` (default),
- call on all existing ones, as well.
- """
- if callback in self.callbacks:
- return
- self.callbacks.append(callback)
- if not existing:
- return
- for dist in self:
- callback(dist)
-
- def _added_new(self, dist):
- for callback in self.callbacks:
- callback(dist)
-
- def __getstate__(self):
- return (
- self.entries[:],
- self.entry_keys.copy(),
- self.by_key.copy(),
- self.normalized_to_canonical_keys.copy(),
- self.callbacks[:],
- )
-
- def __setstate__(self, e_k_b_n_c):
- entries, keys, by_key, normalized_to_canonical_keys, callbacks = e_k_b_n_c
- self.entries = entries[:]
- self.entry_keys = keys.copy()
- self.by_key = by_key.copy()
- self.normalized_to_canonical_keys = normalized_to_canonical_keys.copy()
- self.callbacks = callbacks[:]
-
-
-class _ReqExtras(dict):
- """
- Map each requirement to the extras that demanded it.
- """
-
- def markers_pass(self, req, extras=None):
- """
- Evaluate markers for req against each extra that
- demanded it.
-
- Return False if the req has a marker and fails
- evaluation. Otherwise, return True.
- """
- extra_evals = (
- req.marker.evaluate({'extra': extra})
- for extra in self.get(req, ()) + (extras or (None,))
- )
- return not req.marker or any(extra_evals)
-
-
-class Environment:
- """Searchable snapshot of distributions on a search path"""
-
- def __init__(
- self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR
- ):
- """Snapshot distributions available on a search path
-
- Any distributions found on `search_path` are added to the environment.
- `search_path` should be a sequence of ``sys.path`` items. If not
- supplied, ``sys.path`` is used.
-
- `platform` is an optional string specifying the name of the platform
- that platform-specific distributions must be compatible with. If
- unspecified, it defaults to the current platform. `python` is an
- optional string naming the desired version of Python (e.g. ``'3.6'``);
- it defaults to the current version.
-
- You may explicitly set `platform` (and/or `python`) to ``None`` if you
- wish to map *all* distributions, not just those compatible with the
- running platform or Python version.
- """
- self._distmap = {}
- self.platform = platform
- self.python = python
- self.scan(search_path)
-
- def can_add(self, dist):
- """Is distribution `dist` acceptable for this environment?
-
- The distribution must match the platform and python version
- requirements specified when this environment was created, or False
- is returned.
- """
- py_compat = (
- self.python is None
- or dist.py_version is None
- or dist.py_version == self.python
- )
- return py_compat and compatible_platforms(dist.platform, self.platform)
-
- def remove(self, dist):
- """Remove `dist` from the environment"""
- self._distmap[dist.key].remove(dist)
-
- def scan(self, search_path=None):
- """Scan `search_path` for distributions usable in this environment
-
- Any distributions found are added to the environment.
- `search_path` should be a sequence of ``sys.path`` items. If not
- supplied, ``sys.path`` is used. Only distributions conforming to
- the platform/python version defined at initialization are added.
- """
- if search_path is None:
- search_path = sys.path
-
- for item in search_path:
- for dist in find_distributions(item):
- self.add(dist)
-
- def __getitem__(self, project_name):
- """Return a newest-to-oldest list of distributions for `project_name`
-
- Uses case-insensitive `project_name` comparison, assuming all the
- project's distributions use their project's name converted to all
- lowercase as their key.
-
- """
- distribution_key = project_name.lower()
- return self._distmap.get(distribution_key, [])
-
- def add(self, dist):
- """Add `dist` if we ``can_add()`` it and it has not already been added"""
- if self.can_add(dist) and dist.has_version():
- dists = self._distmap.setdefault(dist.key, [])
- if dist not in dists:
- dists.append(dist)
- dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
-
- def best_match(self, req, working_set, installer=None, replace_conflicting=False):
- """Find distribution best matching `req` and usable on `working_set`
-
- This calls the ``find(req)`` method of the `working_set` to see if a
- suitable distribution is already active. (This may raise
- ``VersionConflict`` if an unsuitable version of the project is already
- active in the specified `working_set`.) If a suitable distribution
- isn't active, this method returns the newest distribution in the
- environment that meets the ``Requirement`` in `req`. If no suitable
- distribution is found, and `installer` is supplied, then the result of
- calling the environment's ``obtain(req, installer)`` method will be
- returned.
- """
- try:
- dist = working_set.find(req)
- except VersionConflict:
- if not replace_conflicting:
- raise
- dist = None
- if dist is not None:
- return dist
- for dist in self[req.key]:
- if dist in req:
- return dist
- # try to download/install
- return self.obtain(req, installer)
-
- def obtain(self, requirement, installer=None):
- """Obtain a distribution matching `requirement` (e.g. via download)
-
- Obtain a distro that matches requirement (e.g. via download). In the
- base ``Environment`` class, this routine just returns
- ``installer(requirement)``, unless `installer` is None, in which case
- None is returned instead. This method is a hook that allows subclasses
- to attempt other ways of obtaining a distribution before falling back
- to the `installer` argument."""
- if installer is not None:
- return installer(requirement)
-
- def __iter__(self):
- """Yield the unique project names of the available distributions"""
- for key in self._distmap.keys():
- if self[key]:
- yield key
-
- def __iadd__(self, other):
- """In-place addition of a distribution or environment"""
- if isinstance(other, Distribution):
- self.add(other)
- elif isinstance(other, Environment):
- for project in other:
- for dist in other[project]:
- self.add(dist)
- else:
- raise TypeError("Can't add %r to environment" % (other,))
- return self
-
- def __add__(self, other):
- """Add an environment or distribution to an environment"""
- new = self.__class__([], platform=None, python=None)
- for env in self, other:
- new += env
- return new
-
-
-# XXX backward compatibility
-AvailableDistributions = Environment
-
-
-class ExtractionError(RuntimeError):
- """An error occurred extracting a resource
-
- The following attributes are available from instances of this exception:
-
- manager
- The resource manager that raised this exception
-
- cache_path
- The base directory for resource extraction
-
- original_error
- The exception instance that caused extraction to fail
- """
-
-
-class ResourceManager:
- """Manage resource extraction and packages"""
-
- extraction_path = None
-
- def __init__(self):
- self.cached_files = {}
-
- def resource_exists(self, package_or_requirement, resource_name):
- """Does the named resource exist?"""
- return get_provider(package_or_requirement).has_resource(resource_name)
-
- def resource_isdir(self, package_or_requirement, resource_name):
- """Is the named resource an existing directory?"""
- return get_provider(package_or_requirement).resource_isdir(resource_name)
-
- def resource_filename(self, package_or_requirement, resource_name):
- """Return a true filesystem path for specified resource"""
- return get_provider(package_or_requirement).get_resource_filename(
- self, resource_name
- )
-
- def resource_stream(self, package_or_requirement, resource_name):
- """Return a readable file-like object for specified resource"""
- return get_provider(package_or_requirement).get_resource_stream(
- self, resource_name
- )
-
- def resource_string(self, package_or_requirement, resource_name):
- """Return specified resource as a string"""
- return get_provider(package_or_requirement).get_resource_string(
- self, resource_name
- )
-
- def resource_listdir(self, package_or_requirement, resource_name):
- """List the contents of the named resource directory"""
- return get_provider(package_or_requirement).resource_listdir(resource_name)
-
- def extraction_error(self):
- """Give an error message for problems extracting file(s)"""
-
- old_exc = sys.exc_info()[1]
- cache_path = self.extraction_path or get_default_cache()
-
- tmpl = textwrap.dedent(
- """
- Can't extract file(s) to egg cache
-
- The following error occurred while trying to extract file(s)
- to the Python egg cache:
-
- {old_exc}
-
- The Python egg cache directory is currently set to:
-
- {cache_path}
-
- Perhaps your account does not have write access to this directory?
- You can change the cache directory by setting the PYTHON_EGG_CACHE
- environment variable to point to an accessible directory.
- """
- ).lstrip()
- err = ExtractionError(tmpl.format(**locals()))
- err.manager = self
- err.cache_path = cache_path
- err.original_error = old_exc
- raise err
-
- def get_cache_path(self, archive_name, names=()):
- """Return absolute location in cache for `archive_name` and `names`
-
- The parent directory of the resulting path will be created if it does
- not already exist. `archive_name` should be the base filename of the
- enclosing egg (which may not be the name of the enclosing zipfile!),
- including its ".egg" extension. `names`, if provided, should be a
- sequence of path name parts "under" the egg's extraction location.
-
- This method should only be called by resource providers that need to
- obtain an extraction location, and only for names they intend to
- extract, as it tracks the generated names for possible cleanup later.
- """
- extract_path = self.extraction_path or get_default_cache()
- target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
- try:
- _bypass_ensure_directory(target_path)
- except Exception:
- self.extraction_error()
-
- self._warn_unsafe_extraction_path(extract_path)
-
- self.cached_files[target_path] = 1
- return target_path
-
- @staticmethod
- def _warn_unsafe_extraction_path(path):
- """
- If the default extraction path is overridden and set to an insecure
- location, such as /tmp, it opens up an opportunity for an attacker to
- replace an extracted file with an unauthorized payload. Warn the user
- if a known insecure location is used.
-
- See Distribute #375 for more details.
- """
- if os.name == 'nt' and not path.startswith(os.environ['windir']):
- # On Windows, permissions are generally restrictive by default
- # and temp directories are not writable by other users, so
- # bypass the warning.
- return
- mode = os.stat(path).st_mode
- if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
- msg = (
- "Extraction path is writable by group/others "
- "and vulnerable to attack when "
- "used with get_resource_filename ({path}). "
- "Consider a more secure "
- "location (set with .set_extraction_path or the "
- "PYTHON_EGG_CACHE environment variable)."
- ).format(**locals())
- warnings.warn(msg, UserWarning)
-
- def postprocess(self, tempname, filename):
- """Perform any platform-specific postprocessing of `tempname`
-
- This is where Mac header rewrites should be done; other platforms don't
- have anything special they should do.
-
- Resource providers should call this method ONLY after successfully
- extracting a compressed resource. They must NOT call it on resources
- that are already in the filesystem.
-
- `tempname` is the current (temporary) name of the file, and `filename`
- is the name it will be renamed to by the caller after this routine
- returns.
- """
-
- if os.name == 'posix':
- # Make the resource executable
- mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
- os.chmod(tempname, mode)
-
- def set_extraction_path(self, path):
- """Set the base path where resources will be extracted to, if needed.
-
- If you do not call this routine before any extractions take place, the
- path defaults to the return value of ``get_default_cache()``. (Which
- is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
- platform-specific fallbacks. See that routine's documentation for more
- details.)
-
- Resources are extracted to subdirectories of this path based upon
- information given by the ``IResourceProvider``. You may set this to a
- temporary directory, but then you must call ``cleanup_resources()`` to
- delete the extracted files when done. There is no guarantee that
- ``cleanup_resources()`` will be able to remove all extracted files.
-
- (Note: you may not change the extraction path for a given resource
- manager once resources have been extracted, unless you first call
- ``cleanup_resources()``.)
- """
- if self.cached_files:
- raise ValueError("Can't change extraction path, files already extracted")
-
- self.extraction_path = path
-
- def cleanup_resources(self, force=False):
- """
- Delete all extracted resource files and directories, returning a list
- of the file and directory names that could not be successfully removed.
- This function does not have any concurrency protection, so it should
- generally only be called when the extraction path is a temporary
- directory exclusive to a single process. This method is not
- automatically called; you must call it explicitly or register it as an
- ``atexit`` function if you wish to ensure cleanup of a temporary
- directory used for extractions.
- """
- # XXX
-
-
-def get_default_cache():
- """
- Return the ``PYTHON_EGG_CACHE`` environment variable
- or a platform-relevant user cache dir for an app
- named "Python-Eggs".
- """
- return os.environ.get('PYTHON_EGG_CACHE') or platformdirs.user_cache_dir(
- appname='Python-Eggs'
- )
-
-
-def safe_name(name):
- """Convert an arbitrary string to a standard distribution name
-
- Any runs of non-alphanumeric/. characters are replaced with a single '-'.
- """
- return re.sub('[^A-Za-z0-9.]+', '-', name)
-
-
-def safe_version(version):
- """
- Convert an arbitrary string to a standard version string
- """
- try:
- # normalize the version
- return str(packaging.version.Version(version))
- except packaging.version.InvalidVersion:
- version = version.replace(' ', '.')
- return re.sub('[^A-Za-z0-9.]+', '-', version)
-
-
-def _forgiving_version(version):
- """Fallback when ``safe_version`` is not safe enough
- >>> parse_version(_forgiving_version('0.23ubuntu1'))
-
- >>> parse_version(_forgiving_version('0.23-'))
-
- >>> parse_version(_forgiving_version('0.-_'))
-
- >>> parse_version(_forgiving_version('42.+?1'))
-
- >>> parse_version(_forgiving_version('hello world'))
-
- """
- version = version.replace(' ', '.')
- match = _PEP440_FALLBACK.search(version)
- if match:
- safe = match["safe"]
- rest = version[len(safe):]
- else:
- safe = "0"
- rest = version
- local = f"sanitized.{_safe_segment(rest)}".strip(".")
- return f"{safe}.dev0+{local}"
-
-
-def _safe_segment(segment):
- """Convert an arbitrary string into a safe segment"""
- segment = re.sub('[^A-Za-z0-9.]+', '-', segment)
- segment = re.sub('-[^A-Za-z0-9]+', '-', segment)
- return re.sub(r'\.[^A-Za-z0-9]+', '.', segment).strip(".-")
-
-
-def safe_extra(extra):
- """Convert an arbitrary string to a standard 'extra' name
-
- Any runs of non-alphanumeric characters are replaced with a single '_',
- and the result is always lowercased.
- """
- return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
-
-
-def to_filename(name):
- """Convert a project or version name to its filename-escaped form
-
- Any '-' characters are currently replaced with '_'.
- """
- return name.replace('-', '_')
-
-
-def invalid_marker(text):
- """
- Validate text as a PEP 508 environment marker; return an exception
- if invalid or False otherwise.
- """
- try:
- evaluate_marker(text)
- except SyntaxError as e:
- e.filename = None
- e.lineno = None
- return e
- return False
-
-
-def evaluate_marker(text, extra=None):
- """
- Evaluate a PEP 508 environment marker.
- Return a boolean indicating the marker result in this environment.
- Raise SyntaxError if marker is invalid.
-
- This implementation uses the 'pyparsing' module.
- """
- try:
- marker = packaging.markers.Marker(text)
- return marker.evaluate()
- except packaging.markers.InvalidMarker as e:
- raise SyntaxError(e) from e
-
-
-class NullProvider:
- """Try to implement resources and metadata for arbitrary PEP 302 loaders"""
-
- egg_name = None
- egg_info = None
- loader = None
-
- def __init__(self, module):
- self.loader = getattr(module, '__loader__', None)
- self.module_path = os.path.dirname(getattr(module, '__file__', ''))
-
- def get_resource_filename(self, manager, resource_name):
- return self._fn(self.module_path, resource_name)
-
- def get_resource_stream(self, manager, resource_name):
- return io.BytesIO(self.get_resource_string(manager, resource_name))
-
- def get_resource_string(self, manager, resource_name):
- return self._get(self._fn(self.module_path, resource_name))
-
- def has_resource(self, resource_name):
- return self._has(self._fn(self.module_path, resource_name))
-
- def _get_metadata_path(self, name):
- return self._fn(self.egg_info, name)
-
- def has_metadata(self, name):
- if not self.egg_info:
- return self.egg_info
-
- path = self._get_metadata_path(name)
- return self._has(path)
-
- def get_metadata(self, name):
- if not self.egg_info:
- return ""
- path = self._get_metadata_path(name)
- value = self._get(path)
- try:
- return value.decode('utf-8')
- except UnicodeDecodeError as exc:
- # Include the path in the error message to simplify
- # troubleshooting, and without changing the exception type.
- exc.reason += ' in {} file at path: {}'.format(name, path)
- raise
-
- def get_metadata_lines(self, name):
- return yield_lines(self.get_metadata(name))
-
- def resource_isdir(self, resource_name):
- return self._isdir(self._fn(self.module_path, resource_name))
-
- def metadata_isdir(self, name):
- return self.egg_info and self._isdir(self._fn(self.egg_info, name))
-
- def resource_listdir(self, resource_name):
- return self._listdir(self._fn(self.module_path, resource_name))
-
- def metadata_listdir(self, name):
- if self.egg_info:
- return self._listdir(self._fn(self.egg_info, name))
- return []
-
- def run_script(self, script_name, namespace):
- script = 'scripts/' + script_name
- if not self.has_metadata(script):
- raise ResolutionError(
- "Script {script!r} not found in metadata at {self.egg_info!r}".format(
- **locals()
- ),
- )
- script_text = self.get_metadata(script).replace('\r\n', '\n')
- script_text = script_text.replace('\r', '\n')
- script_filename = self._fn(self.egg_info, script)
- namespace['__file__'] = script_filename
- if os.path.exists(script_filename):
- with open(script_filename) as fid:
- source = fid.read()
- code = compile(source, script_filename, 'exec')
- exec(code, namespace, namespace)
- else:
- from linecache import cache
-
- cache[script_filename] = (
- len(script_text),
- 0,
- script_text.split('\n'),
- script_filename,
- )
- script_code = compile(script_text, script_filename, 'exec')
- exec(script_code, namespace, namespace)
-
- def _has(self, path):
- raise NotImplementedError(
- "Can't perform this operation for unregistered loader type"
- )
-
- def _isdir(self, path):
- raise NotImplementedError(
- "Can't perform this operation for unregistered loader type"
- )
-
- def _listdir(self, path):
- raise NotImplementedError(
- "Can't perform this operation for unregistered loader type"
- )
-
- def _fn(self, base, resource_name):
- self._validate_resource_path(resource_name)
- if resource_name:
- return os.path.join(base, *resource_name.split('/'))
- return base
-
- @staticmethod
- def _validate_resource_path(path):
- """
- Validate the resource paths according to the docs.
- https://setuptools.pypa.io/en/latest/pkg_resources.html#basic-resource-access
-
- >>> warned = getfixture('recwarn')
- >>> warnings.simplefilter('always')
- >>> vrp = NullProvider._validate_resource_path
- >>> vrp('foo/bar.txt')
- >>> bool(warned)
- False
- >>> vrp('../foo/bar.txt')
- >>> bool(warned)
- True
- >>> warned.clear()
- >>> vrp('/foo/bar.txt')
- >>> bool(warned)
- True
- >>> vrp('foo/../../bar.txt')
- >>> bool(warned)
- True
- >>> warned.clear()
- >>> vrp('foo/f../bar.txt')
- >>> bool(warned)
- False
-
- Windows path separators are straight-up disallowed.
- >>> vrp(r'\\foo/bar.txt')
- Traceback (most recent call last):
- ...
- ValueError: Use of .. or absolute path in a resource path \
-is not allowed.
-
- >>> vrp(r'C:\\foo/bar.txt')
- Traceback (most recent call last):
- ...
- ValueError: Use of .. or absolute path in a resource path \
-is not allowed.
-
- Blank values are allowed
-
- >>> vrp('')
- >>> bool(warned)
- False
-
- Non-string values are not.
-
- >>> vrp(None)
- Traceback (most recent call last):
- ...
- AttributeError: ...
- """
- invalid = (
- os.path.pardir in path.split(posixpath.sep)
- or posixpath.isabs(path)
- or ntpath.isabs(path)
- )
- if not invalid:
- return
-
- msg = "Use of .. or absolute path in a resource path is not allowed."
-
- # Aggressively disallow Windows absolute paths
- if ntpath.isabs(path) and not posixpath.isabs(path):
- raise ValueError(msg)
-
- # for compatibility, warn; in future
- # raise ValueError(msg)
- warnings.warn(
- msg[:-1] + " and will raise exceptions in a future release.",
- DeprecationWarning,
- stacklevel=4,
- )
-
- def _get(self, path):
- if hasattr(self.loader, 'get_data'):
- return self.loader.get_data(path)
- raise NotImplementedError(
- "Can't perform this operation for loaders without 'get_data()'"
- )
-
-
-register_loader_type(object, NullProvider)
-
-
-def _parents(path):
- """
- yield all parents of path including path
- """
- last = None
- while path != last:
- yield path
- last = path
- path, _ = os.path.split(path)
-
-
-class EggProvider(NullProvider):
- """Provider based on a virtual filesystem"""
-
- def __init__(self, module):
- super().__init__(module)
- self._setup_prefix()
-
- def _setup_prefix(self):
- # Assume that metadata may be nested inside a "basket"
- # of multiple eggs and use module_path instead of .archive.
- eggs = filter(_is_egg_path, _parents(self.module_path))
- egg = next(eggs, None)
- egg and self._set_egg(egg)
-
- def _set_egg(self, path):
- self.egg_name = os.path.basename(path)
- self.egg_info = os.path.join(path, 'EGG-INFO')
- self.egg_root = path
-
-
-class DefaultProvider(EggProvider):
- """Provides access to package resources in the filesystem"""
-
- def _has(self, path):
- return os.path.exists(path)
-
- def _isdir(self, path):
- return os.path.isdir(path)
-
- def _listdir(self, path):
- return os.listdir(path)
-
- def get_resource_stream(self, manager, resource_name):
- return open(self._fn(self.module_path, resource_name), 'rb')
-
- def _get(self, path):
- with open(path, 'rb') as stream:
- return stream.read()
-
- @classmethod
- def _register(cls):
- loader_names = (
- 'SourceFileLoader',
- 'SourcelessFileLoader',
- )
- for name in loader_names:
- loader_cls = getattr(importlib_machinery, name, type(None))
- register_loader_type(loader_cls, cls)
-
-
-DefaultProvider._register()
-
-
-class EmptyProvider(NullProvider):
- """Provider that returns nothing for all requests"""
-
- module_path = None
-
- _isdir = _has = lambda self, path: False
-
- def _get(self, path):
- return ''
-
- def _listdir(self, path):
- return []
-
- def __init__(self):
- pass
-
-
-empty_provider = EmptyProvider()
-
-
-class ZipManifests(dict):
- """
- zip manifest builder
- """
-
- @classmethod
- def build(cls, path):
- """
- Build a dictionary similar to the zipimport directory
- caches, except instead of tuples, store ZipInfo objects.
-
- Use a platform-specific path separator (os.sep) for the path keys
- for compatibility with pypy on Windows.
- """
- with zipfile.ZipFile(path) as zfile:
- items = (
- (
- name.replace('/', os.sep),
- zfile.getinfo(name),
- )
- for name in zfile.namelist()
- )
- return dict(items)
-
- load = build
-
-
-class MemoizedZipManifests(ZipManifests):
- """
- Memoized zipfile manifests.
- """
-
- manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
-
- def load(self, path):
- """
- Load a manifest at path or return a suitable manifest already loaded.
- """
- path = os.path.normpath(path)
- mtime = os.stat(path).st_mtime
-
- if path not in self or self[path].mtime != mtime:
- manifest = self.build(path)
- self[path] = self.manifest_mod(manifest, mtime)
-
- return self[path].manifest
-
-
-class ZipProvider(EggProvider):
- """Resource support for zips and eggs"""
-
- eagers = None
- _zip_manifests = MemoizedZipManifests()
-
- def __init__(self, module):
- super().__init__(module)
- self.zip_pre = self.loader.archive + os.sep
-
- def _zipinfo_name(self, fspath):
- # Convert a virtual filename (full path to file) into a zipfile subpath
- # usable with the zipimport directory cache for our target archive
- fspath = fspath.rstrip(os.sep)
- if fspath == self.loader.archive:
- return ''
- if fspath.startswith(self.zip_pre):
- return fspath[len(self.zip_pre) :]
- raise AssertionError("%s is not a subpath of %s" % (fspath, self.zip_pre))
-
- def _parts(self, zip_path):
- # Convert a zipfile subpath into an egg-relative path part list.
- # pseudo-fs path
- fspath = self.zip_pre + zip_path
- if fspath.startswith(self.egg_root + os.sep):
- return fspath[len(self.egg_root) + 1 :].split(os.sep)
- raise AssertionError("%s is not a subpath of %s" % (fspath, self.egg_root))
-
- @property
- def zipinfo(self):
- return self._zip_manifests.load(self.loader.archive)
-
- def get_resource_filename(self, manager, resource_name):
- if not self.egg_name:
- raise NotImplementedError(
- "resource_filename() only supported for .egg, not .zip"
- )
- # no need to lock for extraction, since we use temp names
- zip_path = self._resource_to_zip(resource_name)
- eagers = self._get_eager_resources()
- if '/'.join(self._parts(zip_path)) in eagers:
- for name in eagers:
- self._extract_resource(manager, self._eager_to_zip(name))
- return self._extract_resource(manager, zip_path)
-
- @staticmethod
- def _get_date_and_size(zip_stat):
- size = zip_stat.file_size
- # ymdhms+wday, yday, dst
- date_time = zip_stat.date_time + (0, 0, -1)
- # 1980 offset already done
- timestamp = time.mktime(date_time)
- return timestamp, size
-
- # FIXME: 'ZipProvider._extract_resource' is too complex (12)
- def _extract_resource(self, manager, zip_path): # noqa: C901
- if zip_path in self._index():
- for name in self._index()[zip_path]:
- last = self._extract_resource(manager, os.path.join(zip_path, name))
- # return the extracted directory name
- return os.path.dirname(last)
-
- timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
-
- if not WRITE_SUPPORT:
- raise IOError(
- '"os.rename" and "os.unlink" are not supported ' 'on this platform'
- )
- try:
- real_path = manager.get_cache_path(self.egg_name, self._parts(zip_path))
-
- if self._is_current(real_path, zip_path):
- return real_path
-
- outf, tmpnam = _mkstemp(
- ".$extract",
- dir=os.path.dirname(real_path),
- )
- os.write(outf, self.loader.get_data(zip_path))
- os.close(outf)
- utime(tmpnam, (timestamp, timestamp))
- manager.postprocess(tmpnam, real_path)
-
- try:
- rename(tmpnam, real_path)
-
- except os.error:
- if os.path.isfile(real_path):
- if self._is_current(real_path, zip_path):
- # the file became current since it was checked above,
- # so proceed.
- return real_path
- # Windows, del old file and retry
- elif os.name == 'nt':
- unlink(real_path)
- rename(tmpnam, real_path)
- return real_path
- raise
-
- except os.error:
- # report a user-friendly error
- manager.extraction_error()
-
- return real_path
-
- def _is_current(self, file_path, zip_path):
- """
- Return True if the file_path is current for this zip_path
- """
- timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
- if not os.path.isfile(file_path):
- return False
- stat = os.stat(file_path)
- if stat.st_size != size or stat.st_mtime != timestamp:
- return False
- # check that the contents match
- zip_contents = self.loader.get_data(zip_path)
- with open(file_path, 'rb') as f:
- file_contents = f.read()
- return zip_contents == file_contents
-
- def _get_eager_resources(self):
- if self.eagers is None:
- eagers = []
- for name in ('native_libs.txt', 'eager_resources.txt'):
- if self.has_metadata(name):
- eagers.extend(self.get_metadata_lines(name))
- self.eagers = eagers
- return self.eagers
-
- def _index(self):
- try:
- return self._dirindex
- except AttributeError:
- ind = {}
- for path in self.zipinfo:
- parts = path.split(os.sep)
- while parts:
- parent = os.sep.join(parts[:-1])
- if parent in ind:
- ind[parent].append(parts[-1])
- break
- else:
- ind[parent] = [parts.pop()]
- self._dirindex = ind
- return ind
-
- def _has(self, fspath):
- zip_path = self._zipinfo_name(fspath)
- return zip_path in self.zipinfo or zip_path in self._index()
-
- def _isdir(self, fspath):
- return self._zipinfo_name(fspath) in self._index()
-
- def _listdir(self, fspath):
- return list(self._index().get(self._zipinfo_name(fspath), ()))
-
- def _eager_to_zip(self, resource_name):
- return self._zipinfo_name(self._fn(self.egg_root, resource_name))
-
- def _resource_to_zip(self, resource_name):
- return self._zipinfo_name(self._fn(self.module_path, resource_name))
-
-
-register_loader_type(zipimport.zipimporter, ZipProvider)
-
-
-class FileMetadata(EmptyProvider):
- """Metadata handler for standalone PKG-INFO files
-
- Usage::
-
- metadata = FileMetadata("/path/to/PKG-INFO")
-
- This provider rejects all data and metadata requests except for PKG-INFO,
- which is treated as existing, and will be the contents of the file at
- the provided location.
- """
-
- def __init__(self, path):
- self.path = path
-
- def _get_metadata_path(self, name):
- return self.path
-
- def has_metadata(self, name):
- return name == 'PKG-INFO' and os.path.isfile(self.path)
-
- def get_metadata(self, name):
- if name != 'PKG-INFO':
- raise KeyError("No metadata except PKG-INFO is available")
-
- with io.open(self.path, encoding='utf-8', errors="replace") as f:
- metadata = f.read()
- self._warn_on_replacement(metadata)
- return metadata
-
- def _warn_on_replacement(self, metadata):
- replacement_char = '�'
- if replacement_char in metadata:
- tmpl = "{self.path} could not be properly decoded in UTF-8"
- msg = tmpl.format(**locals())
- warnings.warn(msg)
-
- def get_metadata_lines(self, name):
- return yield_lines(self.get_metadata(name))
-
-
-class PathMetadata(DefaultProvider):
- """Metadata provider for egg directories
-
- Usage::
-
- # Development eggs:
-
- egg_info = "/path/to/PackageName.egg-info"
- base_dir = os.path.dirname(egg_info)
- metadata = PathMetadata(base_dir, egg_info)
- dist_name = os.path.splitext(os.path.basename(egg_info))[0]
- dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
-
- # Unpacked egg directories:
-
- egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
- metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
- dist = Distribution.from_filename(egg_path, metadata=metadata)
- """
-
- def __init__(self, path, egg_info):
- self.module_path = path
- self.egg_info = egg_info
-
-
-class EggMetadata(ZipProvider):
- """Metadata provider for .egg files"""
-
- def __init__(self, importer):
- """Create a metadata provider from a zipimporter"""
-
- self.zip_pre = importer.archive + os.sep
- self.loader = importer
- if importer.prefix:
- self.module_path = os.path.join(importer.archive, importer.prefix)
- else:
- self.module_path = importer.archive
- self._setup_prefix()
-
-
-_declare_state('dict', _distribution_finders={})
-
-
-def register_finder(importer_type, distribution_finder):
- """Register `distribution_finder` to find distributions in sys.path items
-
- `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
- handler), and `distribution_finder` is a callable that, passed a path
- item and the importer instance, yields ``Distribution`` instances found on
- that path item. See ``pkg_resources.find_on_path`` for an example."""
- _distribution_finders[importer_type] = distribution_finder
-
-
-def find_distributions(path_item, only=False):
- """Yield distributions accessible via `path_item`"""
- importer = get_importer(path_item)
- finder = _find_adapter(_distribution_finders, importer)
- return finder(importer, path_item, only)
-
-
-def find_eggs_in_zip(importer, path_item, only=False):
- """
- Find eggs in zip files; possibly multiple nested eggs.
- """
- if importer.archive.endswith('.whl'):
- # wheels are not supported with this finder
- # they don't have PKG-INFO metadata, and won't ever contain eggs
- return
- metadata = EggMetadata(importer)
- if metadata.has_metadata('PKG-INFO'):
- yield Distribution.from_filename(path_item, metadata=metadata)
- if only:
- # don't yield nested distros
- return
- for subitem in metadata.resource_listdir(''):
- if _is_egg_path(subitem):
- subpath = os.path.join(path_item, subitem)
- dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath)
- for dist in dists:
- yield dist
- elif subitem.lower().endswith(('.dist-info', '.egg-info')):
- subpath = os.path.join(path_item, subitem)
- submeta = EggMetadata(zipimport.zipimporter(subpath))
- submeta.egg_info = subpath
- yield Distribution.from_location(path_item, subitem, submeta)
-
-
-register_finder(zipimport.zipimporter, find_eggs_in_zip)
-
-
-def find_nothing(importer, path_item, only=False):
- return ()
-
-
-register_finder(object, find_nothing)
-
-
-def find_on_path(importer, path_item, only=False):
- """Yield distributions accessible on a sys.path directory"""
- path_item = _normalize_cached(path_item)
-
- if _is_unpacked_egg(path_item):
- yield Distribution.from_filename(
- path_item,
- metadata=PathMetadata(path_item, os.path.join(path_item, 'EGG-INFO')),
- )
- return
-
- entries = (os.path.join(path_item, child) for child in safe_listdir(path_item))
-
- # scan for .egg and .egg-info in directory
- for entry in sorted(entries):
- fullpath = os.path.join(path_item, entry)
- factory = dist_factory(path_item, entry, only)
- for dist in factory(fullpath):
- yield dist
-
-
-def dist_factory(path_item, entry, only):
- """Return a dist_factory for the given entry."""
- lower = entry.lower()
- is_egg_info = lower.endswith('.egg-info')
- is_dist_info = lower.endswith('.dist-info') and os.path.isdir(
- os.path.join(path_item, entry)
- )
- is_meta = is_egg_info or is_dist_info
- return (
- distributions_from_metadata
- if is_meta
- else find_distributions
- if not only and _is_egg_path(entry)
- else resolve_egg_link
- if not only and lower.endswith('.egg-link')
- else NoDists()
- )
-
-
-class NoDists:
- """
- >>> bool(NoDists())
- False
-
- >>> list(NoDists()('anything'))
- []
- """
-
- def __bool__(self):
- return False
-
- def __call__(self, fullpath):
- return iter(())
-
-
-def safe_listdir(path):
- """
- Attempt to list contents of path, but suppress some exceptions.
- """
- try:
- return os.listdir(path)
- except (PermissionError, NotADirectoryError):
- pass
- except OSError as e:
- # Ignore the directory if does not exist, not a directory or
- # permission denied
- if e.errno not in (errno.ENOTDIR, errno.EACCES, errno.ENOENT):
- raise
- return ()
-
-
-def distributions_from_metadata(path):
- root = os.path.dirname(path)
- if os.path.isdir(path):
- if len(os.listdir(path)) == 0:
- # empty metadata dir; skip
- return
- metadata = PathMetadata(root, path)
- else:
- metadata = FileMetadata(path)
- entry = os.path.basename(path)
- yield Distribution.from_location(
- root,
- entry,
- metadata,
- precedence=DEVELOP_DIST,
- )
-
-
-def non_empty_lines(path):
- """
- Yield non-empty lines from file at path
- """
- with open(path) as f:
- for line in f:
- line = line.strip()
- if line:
- yield line
-
-
-def resolve_egg_link(path):
- """
- Given a path to an .egg-link, resolve distributions
- present in the referenced path.
- """
- referenced_paths = non_empty_lines(path)
- resolved_paths = (
- os.path.join(os.path.dirname(path), ref) for ref in referenced_paths
- )
- dist_groups = map(find_distributions, resolved_paths)
- return next(dist_groups, ())
-
-
-if hasattr(pkgutil, 'ImpImporter'):
- register_finder(pkgutil.ImpImporter, find_on_path)
-
-register_finder(importlib_machinery.FileFinder, find_on_path)
-
-_declare_state('dict', _namespace_handlers={})
-_declare_state('dict', _namespace_packages={})
-
-
-def register_namespace_handler(importer_type, namespace_handler):
- """Register `namespace_handler` to declare namespace packages
-
- `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
- handler), and `namespace_handler` is a callable like this::
-
- def namespace_handler(importer, path_entry, moduleName, module):
- # return a path_entry to use for child packages
-
- Namespace handlers are only called if the importer object has already
- agreed that it can handle the relevant path item, and they should only
- return a subpath if the module __path__ does not already contain an
- equivalent subpath. For an example namespace handler, see
- ``pkg_resources.file_ns_handler``.
- """
- _namespace_handlers[importer_type] = namespace_handler
-
-
-def _handle_ns(packageName, path_item):
- """Ensure that named package includes a subpath of path_item (if needed)"""
-
- importer = get_importer(path_item)
- if importer is None:
- return None
-
- # use find_spec (PEP 451) and fall-back to find_module (PEP 302)
- try:
- spec = importer.find_spec(packageName)
- except AttributeError:
- # capture warnings due to #1111
- with warnings.catch_warnings():
- warnings.simplefilter("ignore")
- loader = importer.find_module(packageName)
- else:
- loader = spec.loader if spec else None
-
- if loader is None:
- return None
- module = sys.modules.get(packageName)
- if module is None:
- module = sys.modules[packageName] = types.ModuleType(packageName)
- module.__path__ = []
- _set_parent_ns(packageName)
- elif not hasattr(module, '__path__'):
- raise TypeError("Not a package:", packageName)
- handler = _find_adapter(_namespace_handlers, importer)
- subpath = handler(importer, path_item, packageName, module)
- if subpath is not None:
- path = module.__path__
- path.append(subpath)
- importlib.import_module(packageName)
- _rebuild_mod_path(path, packageName, module)
- return subpath
-
-
-def _rebuild_mod_path(orig_path, package_name, module):
- """
- Rebuild module.__path__ ensuring that all entries are ordered
- corresponding to their sys.path order
- """
- sys_path = [_normalize_cached(p) for p in sys.path]
-
- def safe_sys_path_index(entry):
- """
- Workaround for #520 and #513.
- """
- try:
- return sys_path.index(entry)
- except ValueError:
- return float('inf')
-
- def position_in_sys_path(path):
- """
- Return the ordinal of the path based on its position in sys.path
- """
- path_parts = path.split(os.sep)
- module_parts = package_name.count('.') + 1
- parts = path_parts[:-module_parts]
- return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
-
- new_path = sorted(orig_path, key=position_in_sys_path)
- new_path = [_normalize_cached(p) for p in new_path]
-
- if isinstance(module.__path__, list):
- module.__path__[:] = new_path
- else:
- module.__path__ = new_path
-
-
-def declare_namespace(packageName):
- """Declare that package 'packageName' is a namespace package"""
-
- msg = (
- f"Deprecated call to `pkg_resources.declare_namespace({packageName!r})`.\n"
- "Implementing implicit namespace packages (as specified in PEP 420) "
- "is preferred to `pkg_resources.declare_namespace`. "
- "See https://setuptools.pypa.io/en/latest/references/"
- "keywords.html#keyword-namespace-packages"
- )
- warnings.warn(msg, DeprecationWarning, stacklevel=2)
-
- _imp.acquire_lock()
- try:
- if packageName in _namespace_packages:
- return
-
- path = sys.path
- parent, _, _ = packageName.rpartition('.')
-
- if parent:
- declare_namespace(parent)
- if parent not in _namespace_packages:
- __import__(parent)
- try:
- path = sys.modules[parent].__path__
- except AttributeError as e:
- raise TypeError("Not a package:", parent) from e
-
- # Track what packages are namespaces, so when new path items are added,
- # they can be updated
- _namespace_packages.setdefault(parent or None, []).append(packageName)
- _namespace_packages.setdefault(packageName, [])
-
- for path_item in path:
- # Ensure all the parent's path items are reflected in the child,
- # if they apply
- _handle_ns(packageName, path_item)
-
- finally:
- _imp.release_lock()
-
-
-def fixup_namespace_packages(path_item, parent=None):
- """Ensure that previously-declared namespace packages include path_item"""
- _imp.acquire_lock()
- try:
- for package in _namespace_packages.get(parent, ()):
- subpath = _handle_ns(package, path_item)
- if subpath:
- fixup_namespace_packages(subpath, package)
- finally:
- _imp.release_lock()
-
-
-def file_ns_handler(importer, path_item, packageName, module):
- """Compute an ns-package subpath for a filesystem or zipfile importer"""
-
- subpath = os.path.join(path_item, packageName.split('.')[-1])
- normalized = _normalize_cached(subpath)
- for item in module.__path__:
- if _normalize_cached(item) == normalized:
- break
- else:
- # Only return the path if it's not already there
- return subpath
-
-
-if hasattr(pkgutil, 'ImpImporter'):
- register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
-
-register_namespace_handler(zipimport.zipimporter, file_ns_handler)
-register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
-
-
-def null_ns_handler(importer, path_item, packageName, module):
- return None
-
-
-register_namespace_handler(object, null_ns_handler)
-
-
-def normalize_path(filename):
- """Normalize a file/dir name for comparison purposes"""
- return os.path.normcase(os.path.realpath(os.path.normpath(_cygwin_patch(filename))))
-
-
-def _cygwin_patch(filename): # pragma: nocover
- """
- Contrary to POSIX 2008, on Cygwin, getcwd (3) contains
- symlink components. Using
- os.path.abspath() works around this limitation. A fix in os.getcwd()
- would probably better, in Cygwin even more so, except
- that this seems to be by design...
- """
- return os.path.abspath(filename) if sys.platform == 'cygwin' else filename
-
-
-def _normalize_cached(filename, _cache={}):
- try:
- return _cache[filename]
- except KeyError:
- _cache[filename] = result = normalize_path(filename)
- return result
-
-
-def _is_egg_path(path):
- """
- Determine if given path appears to be an egg.
- """
- return _is_zip_egg(path) or _is_unpacked_egg(path)
-
-
-def _is_zip_egg(path):
- return (
- path.lower().endswith('.egg')
- and os.path.isfile(path)
- and zipfile.is_zipfile(path)
- )
-
-
-def _is_unpacked_egg(path):
- """
- Determine if given path appears to be an unpacked egg.
- """
- return path.lower().endswith('.egg') and os.path.isfile(
- os.path.join(path, 'EGG-INFO', 'PKG-INFO')
- )
-
-
-def _set_parent_ns(packageName):
- parts = packageName.split('.')
- name = parts.pop()
- if parts:
- parent = '.'.join(parts)
- setattr(sys.modules[parent], name, sys.modules[packageName])
-
-
-MODULE = re.compile(r"\w+(\.\w+)*$").match
-EGG_NAME = re.compile(
- r"""
- (?P[^-]+) (
- -(?P[^-]+) (
- -py(?P[^-]+) (
- -(?P.+)
- )?
- )?
- )?
- """,
- re.VERBOSE | re.IGNORECASE,
-).match
-
-
-class EntryPoint:
- """Object representing an advertised importable object"""
-
- def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
- if not MODULE(module_name):
- raise ValueError("Invalid module name", module_name)
- self.name = name
- self.module_name = module_name
- self.attrs = tuple(attrs)
- self.extras = tuple(extras)
- self.dist = dist
-
- def __str__(self):
- s = "%s = %s" % (self.name, self.module_name)
- if self.attrs:
- s += ':' + '.'.join(self.attrs)
- if self.extras:
- s += ' [%s]' % ','.join(self.extras)
- return s
-
- def __repr__(self):
- return "EntryPoint.parse(%r)" % str(self)
-
- def load(self, require=True, *args, **kwargs):
- """
- Require packages for this EntryPoint, then resolve it.
- """
- if not require or args or kwargs:
- warnings.warn(
- "Parameters to load are deprecated. Call .resolve and "
- ".require separately.",
- PkgResourcesDeprecationWarning,
- stacklevel=2,
- )
- if require:
- self.require(*args, **kwargs)
- return self.resolve()
-
- def resolve(self):
- """
- Resolve the entry point from its module and attrs.
- """
- module = __import__(self.module_name, fromlist=['__name__'], level=0)
- try:
- return functools.reduce(getattr, self.attrs, module)
- except AttributeError as exc:
- raise ImportError(str(exc)) from exc
-
- def require(self, env=None, installer=None):
- if self.extras and not self.dist:
- raise UnknownExtra("Can't require() without a distribution", self)
-
- # Get the requirements for this entry point with all its extras and
- # then resolve them. We have to pass `extras` along when resolving so
- # that the working set knows what extras we want. Otherwise, for
- # dist-info distributions, the working set will assume that the
- # requirements for that extra are purely optional and skip over them.
- reqs = self.dist.requires(self.extras)
- items = working_set.resolve(reqs, env, installer, extras=self.extras)
- list(map(working_set.add, items))
-
- pattern = re.compile(
- r'\s*'
- r'(?P.+?)\s*'
- r'=\s*'
- r'(?P[\w.]+)\s*'
- r'(:\s*(?P[\w.]+))?\s*'
- r'(?P\[.*\])?\s*$'
- )
-
- @classmethod
- def parse(cls, src, dist=None):
- """Parse a single entry point from string `src`
-
- Entry point syntax follows the form::
-
- name = some.module:some.attr [extra1, extra2]
-
- The entry name and module name are required, but the ``:attrs`` and
- ``[extras]`` parts are optional
- """
- m = cls.pattern.match(src)
- if not m:
- msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
- raise ValueError(msg, src)
- res = m.groupdict()
- extras = cls._parse_extras(res['extras'])
- attrs = res['attr'].split('.') if res['attr'] else ()
- return cls(res['name'], res['module'], attrs, extras, dist)
-
- @classmethod
- def _parse_extras(cls, extras_spec):
- if not extras_spec:
- return ()
- req = Requirement.parse('x' + extras_spec)
- if req.specs:
- raise ValueError()
- return req.extras
-
- @classmethod
- def parse_group(cls, group, lines, dist=None):
- """Parse an entry point group"""
- if not MODULE(group):
- raise ValueError("Invalid group name", group)
- this = {}
- for line in yield_lines(lines):
- ep = cls.parse(line, dist)
- if ep.name in this:
- raise ValueError("Duplicate entry point", group, ep.name)
- this[ep.name] = ep
- return this
-
- @classmethod
- def parse_map(cls, data, dist=None):
- """Parse a map of entry point groups"""
- if isinstance(data, dict):
- data = data.items()
- else:
- data = split_sections(data)
- maps = {}
- for group, lines in data:
- if group is None:
- if not lines:
- continue
- raise ValueError("Entry points must be listed in groups")
- group = group.strip()
- if group in maps:
- raise ValueError("Duplicate group name", group)
- maps[group] = cls.parse_group(group, lines, dist)
- return maps
-
-
-def _version_from_file(lines):
- """
- Given an iterable of lines from a Metadata file, return
- the value of the Version field, if present, or None otherwise.
- """
-
- def is_version_line(line):
- return line.lower().startswith('version:')
-
- version_lines = filter(is_version_line, lines)
- line = next(iter(version_lines), '')
- _, _, value = line.partition(':')
- return safe_version(value.strip()) or None
-
-
-class Distribution:
- """Wrap an actual or potential sys.path entry w/metadata"""
-
- PKG_INFO = 'PKG-INFO'
-
- def __init__(
- self,
- location=None,
- metadata=None,
- project_name=None,
- version=None,
- py_version=PY_MAJOR,
- platform=None,
- precedence=EGG_DIST,
- ):
- self.project_name = safe_name(project_name or 'Unknown')
- if version is not None:
- self._version = safe_version(version)
- self.py_version = py_version
- self.platform = platform
- self.location = location
- self.precedence = precedence
- self._provider = metadata or empty_provider
-
- @classmethod
- def from_location(cls, location, basename, metadata=None, **kw):
- project_name, version, py_version, platform = [None] * 4
- basename, ext = os.path.splitext(basename)
- if ext.lower() in _distributionImpl:
- cls = _distributionImpl[ext.lower()]
-
- match = EGG_NAME(basename)
- if match:
- project_name, version, py_version, platform = match.group(
- 'name', 'ver', 'pyver', 'plat'
- )
- return cls(
- location,
- metadata,
- project_name=project_name,
- version=version,
- py_version=py_version,
- platform=platform,
- **kw,
- )._reload_version()
-
- def _reload_version(self):
- return self
-
- @property
- def hashcmp(self):
- return (
- self._forgiving_parsed_version,
- self.precedence,
- self.key,
- self.location,
- self.py_version or '',
- self.platform or '',
- )
-
- def __hash__(self):
- return hash(self.hashcmp)
-
- def __lt__(self, other):
- return self.hashcmp < other.hashcmp
-
- def __le__(self, other):
- return self.hashcmp <= other.hashcmp
-
- def __gt__(self, other):
- return self.hashcmp > other.hashcmp
-
- def __ge__(self, other):
- return self.hashcmp >= other.hashcmp
-
- def __eq__(self, other):
- if not isinstance(other, self.__class__):
- # It's not a Distribution, so they are not equal
- return False
- return self.hashcmp == other.hashcmp
-
- def __ne__(self, other):
- return not self == other
-
- # These properties have to be lazy so that we don't have to load any
- # metadata until/unless it's actually needed. (i.e., some distributions
- # may not know their name or version without loading PKG-INFO)
-
- @property
- def key(self):
- try:
- return self._key
- except AttributeError:
- self._key = key = self.project_name.lower()
- return key
-
- @property
- def parsed_version(self):
- if not hasattr(self, "_parsed_version"):
- try:
- self._parsed_version = parse_version(self.version)
- except packaging.version.InvalidVersion as ex:
- info = f"(package: {self.project_name})"
- if hasattr(ex, "add_note"):
- ex.add_note(info) # PEP 678
- raise
- raise packaging.version.InvalidVersion(f"{str(ex)} {info}") from None
-
- return self._parsed_version
-
- @property
- def _forgiving_parsed_version(self):
- try:
- return self.parsed_version
- except packaging.version.InvalidVersion as ex:
- self._parsed_version = parse_version(_forgiving_version(self.version))
-
- notes = "\n".join(getattr(ex, "__notes__", [])) # PEP 678
- msg = f"""!!\n\n
- *************************************************************************
- {str(ex)}\n{notes}
-
- This is a long overdue deprecation.
- For the time being, `pkg_resources` will use `{self._parsed_version}`
- as a replacement to avoid breaking existing environments,
- but no future compatibility is guaranteed.
-
- If you maintain package {self.project_name} you should implement
- the relevant changes to adequate the project to PEP 440 immediately.
- *************************************************************************
- \n\n!!
- """
- warnings.warn(msg, DeprecationWarning)
-
- return self._parsed_version
-
- @property
- def version(self):
- try:
- return self._version
- except AttributeError as e:
- version = self._get_version()
- if version is None:
- path = self._get_metadata_path_for_display(self.PKG_INFO)
- msg = ("Missing 'Version:' header and/or {} file at path: {}").format(
- self.PKG_INFO, path
- )
- raise ValueError(msg, self) from e
-
- return version
-
- @property
- def _dep_map(self):
- """
- A map of extra to its list of (direct) requirements
- for this distribution, including the null extra.
- """
- try:
- return self.__dep_map
- except AttributeError:
- self.__dep_map = self._filter_extras(self._build_dep_map())
- return self.__dep_map
-
- @staticmethod
- def _filter_extras(dm):
- """
- Given a mapping of extras to dependencies, strip off
- environment markers and filter out any dependencies
- not matching the markers.
- """
- for extra in list(filter(None, dm)):
- new_extra = extra
- reqs = dm.pop(extra)
- new_extra, _, marker = extra.partition(':')
- fails_marker = marker and (
- invalid_marker(marker) or not evaluate_marker(marker)
- )
- if fails_marker:
- reqs = []
- new_extra = safe_extra(new_extra) or None
-
- dm.setdefault(new_extra, []).extend(reqs)
- return dm
-
- def _build_dep_map(self):
- dm = {}
- for name in 'requires.txt', 'depends.txt':
- for extra, reqs in split_sections(self._get_metadata(name)):
- dm.setdefault(extra, []).extend(parse_requirements(reqs))
- return dm
-
- def requires(self, extras=()):
- """List of Requirements needed for this distro if `extras` are used"""
- dm = self._dep_map
- deps = []
- deps.extend(dm.get(None, ()))
- for ext in extras:
- try:
- deps.extend(dm[safe_extra(ext)])
- except KeyError as e:
- raise UnknownExtra(
- "%s has no such extra feature %r" % (self, ext)
- ) from e
- return deps
-
- def _get_metadata_path_for_display(self, name):
- """
- Return the path to the given metadata file, if available.
- """
- try:
- # We need to access _get_metadata_path() on the provider object
- # directly rather than through this class's __getattr__()
- # since _get_metadata_path() is marked private.
- path = self._provider._get_metadata_path(name)
-
- # Handle exceptions e.g. in case the distribution's metadata
- # provider doesn't support _get_metadata_path().
- except Exception:
- return '[could not detect]'
-
- return path
-
- def _get_metadata(self, name):
- if self.has_metadata(name):
- for line in self.get_metadata_lines(name):
- yield line
-
- def _get_version(self):
- lines = self._get_metadata(self.PKG_INFO)
- version = _version_from_file(lines)
-
- return version
-
- def activate(self, path=None, replace=False):
- """Ensure distribution is importable on `path` (default=sys.path)"""
- if path is None:
- path = sys.path
- self.insert_on(path, replace=replace)
- if path is sys.path:
- fixup_namespace_packages(self.location)
- for pkg in self._get_metadata('namespace_packages.txt'):
- if pkg in sys.modules:
- declare_namespace(pkg)
-
- def egg_name(self):
- """Return what this distribution's standard .egg filename should be"""
- filename = "%s-%s-py%s" % (
- to_filename(self.project_name),
- to_filename(self.version),
- self.py_version or PY_MAJOR,
- )
-
- if self.platform:
- filename += '-' + self.platform
- return filename
-
- def __repr__(self):
- if self.location:
- return "%s (%s)" % (self, self.location)
- else:
- return str(self)
-
- def __str__(self):
- try:
- version = getattr(self, 'version', None)
- except ValueError:
- version = None
- version = version or "[unknown version]"
- return "%s %s" % (self.project_name, version)
-
- def __getattr__(self, attr):
- """Delegate all unrecognized public attributes to .metadata provider"""
- if attr.startswith('_'):
- raise AttributeError(attr)
- return getattr(self._provider, attr)
-
- def __dir__(self):
- return list(
- set(super(Distribution, self).__dir__())
- | set(attr for attr in self._provider.__dir__() if not attr.startswith('_'))
- )
-
- @classmethod
- def from_filename(cls, filename, metadata=None, **kw):
- return cls.from_location(
- _normalize_cached(filename), os.path.basename(filename), metadata, **kw
- )
-
- def as_requirement(self):
- """Return a ``Requirement`` that matches this distribution exactly"""
- if isinstance(self.parsed_version, packaging.version.Version):
- spec = "%s==%s" % (self.project_name, self.parsed_version)
- else:
- spec = "%s===%s" % (self.project_name, self.parsed_version)
-
- return Requirement.parse(spec)
-
- def load_entry_point(self, group, name):
- """Return the `name` entry point of `group` or raise ImportError"""
- ep = self.get_entry_info(group, name)
- if ep is None:
- raise ImportError("Entry point %r not found" % ((group, name),))
- return ep.load()
-
- def get_entry_map(self, group=None):
- """Return the entry point map for `group`, or the full entry map"""
- try:
- ep_map = self._ep_map
- except AttributeError:
- ep_map = self._ep_map = EntryPoint.parse_map(
- self._get_metadata('entry_points.txt'), self
- )
- if group is not None:
- return ep_map.get(group, {})
- return ep_map
-
- def get_entry_info(self, group, name):
- """Return the EntryPoint object for `group`+`name`, or ``None``"""
- return self.get_entry_map(group).get(name)
-
- # FIXME: 'Distribution.insert_on' is too complex (13)
- def insert_on(self, path, loc=None, replace=False): # noqa: C901
- """Ensure self.location is on path
-
- If replace=False (default):
- - If location is already in path anywhere, do nothing.
- - Else:
- - If it's an egg and its parent directory is on path,
- insert just ahead of the parent.
- - Else: add to the end of path.
- If replace=True:
- - If location is already on path anywhere (not eggs)
- or higher priority than its parent (eggs)
- do nothing.
- - Else:
- - If it's an egg and its parent directory is on path,
- insert just ahead of the parent,
- removing any lower-priority entries.
- - Else: add it to the front of path.
- """
-
- loc = loc or self.location
- if not loc:
- return
-
- nloc = _normalize_cached(loc)
- bdir = os.path.dirname(nloc)
- npath = [(p and _normalize_cached(p) or p) for p in path]
-
- for p, item in enumerate(npath):
- if item == nloc:
- if replace:
- break
- else:
- # don't modify path (even removing duplicates) if
- # found and not replace
- return
- elif item == bdir and self.precedence == EGG_DIST:
- # if it's an .egg, give it precedence over its directory
- # UNLESS it's already been added to sys.path and replace=False
- if (not replace) and nloc in npath[p:]:
- return
- if path is sys.path:
- self.check_version_conflict()
- path.insert(p, loc)
- npath.insert(p, nloc)
- break
- else:
- if path is sys.path:
- self.check_version_conflict()
- if replace:
- path.insert(0, loc)
- else:
- path.append(loc)
- return
-
- # p is the spot where we found or inserted loc; now remove duplicates
- while True:
- try:
- np = npath.index(nloc, p + 1)
- except ValueError:
- break
- else:
- del npath[np], path[np]
- # ha!
- p = np
-
- return
-
- def check_version_conflict(self):
- if self.key == 'setuptools':
- # ignore the inevitable setuptools self-conflicts :(
- return
-
- nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
- loc = normalize_path(self.location)
- for modname in self._get_metadata('top_level.txt'):
- if (
- modname not in sys.modules
- or modname in nsp
- or modname in _namespace_packages
- ):
- continue
- if modname in ('pkg_resources', 'setuptools', 'site'):
- continue
- fn = getattr(sys.modules[modname], '__file__', None)
- if fn and (
- normalize_path(fn).startswith(loc) or fn.startswith(self.location)
- ):
- continue
- issue_warning(
- "Module %s was already imported from %s, but %s is being added"
- " to sys.path" % (modname, fn, self.location),
- )
-
- def has_version(self):
- try:
- self.version
- except ValueError:
- issue_warning("Unbuilt egg for " + repr(self))
- return False
- except SystemError:
- # TODO: remove this except clause when python/cpython#103632 is fixed.
- return False
- return True
-
- def clone(self, **kw):
- """Copy this distribution, substituting in any changed keyword args"""
- names = 'project_name version py_version platform location precedence'
- for attr in names.split():
- kw.setdefault(attr, getattr(self, attr, None))
- kw.setdefault('metadata', self._provider)
- return self.__class__(**kw)
-
- @property
- def extras(self):
- return [dep for dep in self._dep_map if dep]
-
-
-class EggInfoDistribution(Distribution):
- def _reload_version(self):
- """
- Packages installed by distutils (e.g. numpy or scipy),
- which uses an old safe_version, and so
- their version numbers can get mangled when
- converted to filenames (e.g., 1.11.0.dev0+2329eae to
- 1.11.0.dev0_2329eae). These distributions will not be
- parsed properly
- downstream by Distribution and safe_version, so
- take an extra step and try to get the version number from
- the metadata file itself instead of the filename.
- """
- md_version = self._get_version()
- if md_version:
- self._version = md_version
- return self
-
-
-class DistInfoDistribution(Distribution):
- """
- Wrap an actual or potential sys.path entry
- w/metadata, .dist-info style.
- """
-
- PKG_INFO = 'METADATA'
- EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
-
- @property
- def _parsed_pkg_info(self):
- """Parse and cache metadata"""
- try:
- return self._pkg_info
- except AttributeError:
- metadata = self.get_metadata(self.PKG_INFO)
- self._pkg_info = email.parser.Parser().parsestr(metadata)
- return self._pkg_info
-
- @property
- def _dep_map(self):
- try:
- return self.__dep_map
- except AttributeError:
- self.__dep_map = self._compute_dependencies()
- return self.__dep_map
-
- def _compute_dependencies(self):
- """Recompute this distribution's dependencies."""
- dm = self.__dep_map = {None: []}
-
- reqs = []
- # Including any condition expressions
- for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
- reqs.extend(parse_requirements(req))
-
- def reqs_for_extra(extra):
- for req in reqs:
- if not req.marker or req.marker.evaluate({'extra': extra}):
- yield req
-
- common = types.MappingProxyType(dict.fromkeys(reqs_for_extra(None)))
- dm[None].extend(common)
-
- for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
- s_extra = safe_extra(extra.strip())
- dm[s_extra] = [r for r in reqs_for_extra(extra) if r not in common]
-
- return dm
-
-
-_distributionImpl = {
- '.egg': Distribution,
- '.egg-info': EggInfoDistribution,
- '.dist-info': DistInfoDistribution,
-}
-
-
-def issue_warning(*args, **kw):
- level = 1
- g = globals()
- try:
- # find the first stack frame that is *not* code in
- # the pkg_resources module, to use for the warning
- while sys._getframe(level).f_globals is g:
- level += 1
- except ValueError:
- pass
- warnings.warn(stacklevel=level + 1, *args, **kw)
-
-
-def parse_requirements(strs):
- """
- Yield ``Requirement`` objects for each specification in `strs`.
-
- `strs` must be a string, or a (possibly-nested) iterable thereof.
- """
- return map(Requirement, join_continuation(map(drop_comment, yield_lines(strs))))
-
-
-class RequirementParseError(packaging.requirements.InvalidRequirement):
- "Compatibility wrapper for InvalidRequirement"
-
-
-class Requirement(packaging.requirements.Requirement):
- def __init__(self, requirement_string):
- """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
- super(Requirement, self).__init__(requirement_string)
- self.unsafe_name = self.name
- project_name = safe_name(self.name)
- self.project_name, self.key = project_name, project_name.lower()
- self.specs = [(spec.operator, spec.version) for spec in self.specifier]
- self.extras = tuple(map(safe_extra, self.extras))
- self.hashCmp = (
- self.key,
- self.url,
- self.specifier,
- frozenset(self.extras),
- str(self.marker) if self.marker else None,
- )
- self.__hash = hash(self.hashCmp)
-
- def __eq__(self, other):
- return isinstance(other, Requirement) and self.hashCmp == other.hashCmp
-
- def __ne__(self, other):
- return not self == other
-
- def __contains__(self, item):
- if isinstance(item, Distribution):
- if item.key != self.key:
- return False
-
- item = item.version
-
- # Allow prereleases always in order to match the previous behavior of
- # this method. In the future this should be smarter and follow PEP 440
- # more accurately.
- return self.specifier.contains(item, prereleases=True)
-
- def __hash__(self):
- return self.__hash
-
- def __repr__(self):
- return "Requirement.parse(%r)" % str(self)
-
- @staticmethod
- def parse(s):
- (req,) = parse_requirements(s)
- return req
-
-
-def _always_object(classes):
- """
- Ensure object appears in the mro even
- for old-style classes.
- """
- if object not in classes:
- return classes + (object,)
- return classes
-
-
-def _find_adapter(registry, ob):
- """Return an adapter factory for `ob` from `registry`"""
- types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob))))
- for t in types:
- if t in registry:
- return registry[t]
-
-
-def ensure_directory(path):
- """Ensure that the parent directory of `path` exists"""
- dirname = os.path.dirname(path)
- os.makedirs(dirname, exist_ok=True)
-
-
-def _bypass_ensure_directory(path):
- """Sandbox-bypassing version of ensure_directory()"""
- if not WRITE_SUPPORT:
- raise IOError('"os.mkdir" not supported on this platform.')
- dirname, filename = split(path)
- if dirname and filename and not isdir(dirname):
- _bypass_ensure_directory(dirname)
- try:
- mkdir(dirname, 0o755)
- except FileExistsError:
- pass
-
-
-def split_sections(s):
- """Split a string or iterable thereof into (section, content) pairs
-
- Each ``section`` is a stripped version of the section header ("[section]")
- and each ``content`` is a list of stripped lines excluding blank lines and
- comment-only lines. If there are any such lines before the first section
- header, they're returned in a first ``section`` of ``None``.
- """
- section = None
- content = []
- for line in yield_lines(s):
- if line.startswith("["):
- if line.endswith("]"):
- if section or content:
- yield section, content
- section = line[1:-1].strip()
- content = []
- else:
- raise ValueError("Invalid section heading", line)
- else:
- content.append(line)
-
- # wrap up last segment
- yield section, content
-
-
-def _mkstemp(*args, **kw):
- old_open = os.open
- try:
- # temporarily bypass sandboxing
- os.open = os_open
- return tempfile.mkstemp(*args, **kw)
- finally:
- # and then put it back
- os.open = old_open
-
-
-# Silence the PEP440Warning by default, so that end users don't get hit by it
-# randomly just because they use pkg_resources. We want to append the rule
-# because we want earlier uses of filterwarnings to take precedence over this
-# one.
-warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
-
-
-# from jaraco.functools 1.3
-def _call_aside(f, *args, **kwargs):
- f(*args, **kwargs)
- return f
-
-
-@_call_aside
-def _initialize(g=globals()):
- "Set up global resource manager (deliberately not state-saved)"
- manager = ResourceManager()
- g['_manager'] = manager
- g.update(
- (name, getattr(manager, name))
- for name in dir(manager)
- if not name.startswith('_')
- )
-
-
-class PkgResourcesDeprecationWarning(Warning):
- """
- Base class for warning about deprecations in ``pkg_resources``
-
- This class is not derived from ``DeprecationWarning``, and as such is
- visible by default.
- """
-
-
-@_call_aside
-def _initialize_master_working_set():
- """
- Prepare the master working set and make the ``require()``
- API available.
-
- This function has explicit effects on the global state
- of pkg_resources. It is intended to be invoked once at
- the initialization of this module.
-
- Invocation by other packages is unsupported and done
- at their own risk.
- """
- working_set = WorkingSet._build_master()
- _declare_state('object', working_set=working_set)
-
- require = working_set.require
- iter_entry_points = working_set.iter_entry_points
- add_activation_listener = working_set.subscribe
- run_script = working_set.run_script
- # backward compatibility
- run_main = run_script
- # Activate all distributions already on sys.path with replace=False and
- # ensure that all distributions added to the working set in the future
- # (e.g. by calling ``require()``) will get activated as well,
- # with higher priority (replace=True).
- tuple(dist.activate(replace=False) for dist in working_set)
- add_activation_listener(
- lambda dist: dist.activate(replace=True),
- existing=False,
- )
- working_set.entries = []
- # match order
- list(map(working_set.add_entry, sys.path))
- globals().update(locals())
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_importlib.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_importlib.py
deleted file mode 100644
index 819bf5d3c2454c0a1853cfb695ed904686e1deb1..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_importlib.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import sys
-
-
-def disable_importlib_metadata_finder(metadata):
- """
- Ensure importlib_metadata doesn't provide older, incompatible
- Distributions.
-
- Workaround for #3102.
- """
- try:
- import importlib_metadata
- except ImportError:
- return
- except AttributeError:
- import warnings
-
- msg = (
- "`importlib-metadata` version is incompatible with `setuptools`.\n"
- "This problem is likely to be solved by installing an updated version of "
- "`importlib-metadata`."
- )
- warnings.warn(msg) # Ensure a descriptive message is shown.
- raise # This exception can be suppressed by _distutils_hack
-
- if importlib_metadata is metadata:
- return
- to_remove = [
- ob
- for ob in sys.meta_path
- if isinstance(ob, importlib_metadata.MetadataPathFinder)
- ]
- for item in to_remove:
- sys.meta_path.remove(item)
-
-
-if sys.version_info < (3, 10):
- from setuptools.extern import importlib_metadata as metadata
- disable_importlib_metadata_finder(metadata)
-else:
- import importlib.metadata as metadata # noqa: F401
-
-
-if sys.version_info < (3, 9):
- from setuptools.extern import importlib_resources as resources
-else:
- import importlib.resources as resources # noqa: F401
diff --git a/spaces/AzinZ/vitscn/commons.py b/spaces/AzinZ/vitscn/commons.py
deleted file mode 100644
index 9ad0444b61cbadaa388619986c2889c707d873ce..0000000000000000000000000000000000000000
--- a/spaces/AzinZ/vitscn/commons.py
+++ /dev/null
@@ -1,161 +0,0 @@
-import math
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-
-def init_weights(m, mean=0.0, std=0.01):
- classname = m.__class__.__name__
- if classname.find("Conv") != -1:
- m.weight.data.normal_(mean, std)
-
-
-def get_padding(kernel_size, dilation=1):
- return int((kernel_size*dilation - dilation)/2)
-
-
-def convert_pad_shape(pad_shape):
- l = pad_shape[::-1]
- pad_shape = [item for sublist in l for item in sublist]
- return pad_shape
-
-
-def intersperse(lst, item):
- result = [item] * (len(lst) * 2 + 1)
- result[1::2] = lst
- return result
-
-
-def kl_divergence(m_p, logs_p, m_q, logs_q):
- """KL(P||Q)"""
- kl = (logs_q - logs_p) - 0.5
- kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
- return kl
-
-
-def rand_gumbel(shape):
- """Sample from the Gumbel distribution, protect from overflows."""
- uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
- return -torch.log(-torch.log(uniform_samples))
-
-
-def rand_gumbel_like(x):
- g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
- return g
-
-
-def slice_segments(x, ids_str, segment_size=4):
- ret = torch.zeros_like(x[:, :, :segment_size])
- for i in range(x.size(0)):
- idx_str = ids_str[i]
- idx_end = idx_str + segment_size
- ret[i] = x[i, :, idx_str:idx_end]
- return ret
-
-
-def rand_slice_segments(x, x_lengths=None, segment_size=4):
- b, d, t = x.size()
- if x_lengths is None:
- x_lengths = t
- ids_str_max = x_lengths - segment_size + 1
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
- ret = slice_segments(x, ids_str, segment_size)
- return ret, ids_str
-
-
-def get_timing_signal_1d(
- length, channels, min_timescale=1.0, max_timescale=1.0e4):
- position = torch.arange(length, dtype=torch.float)
- num_timescales = channels // 2
- log_timescale_increment = (
- math.log(float(max_timescale) / float(min_timescale)) /
- (num_timescales - 1))
- inv_timescales = min_timescale * torch.exp(
- torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
- scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
- signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
- signal = F.pad(signal, [0, 0, 0, channels % 2])
- signal = signal.view(1, channels, length)
- return signal
-
-
-def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
- b, channels, length = x.size()
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
- return x + signal.to(dtype=x.dtype, device=x.device)
-
-
-def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
- b, channels, length = x.size()
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
- return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
-
-
-def subsequent_mask(length):
- mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
- return mask
-
-
-@torch.jit.script
-def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
- n_channels_int = n_channels[0]
- in_act = input_a + input_b
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
- acts = t_act * s_act
- return acts
-
-
-def convert_pad_shape(pad_shape):
- l = pad_shape[::-1]
- pad_shape = [item for sublist in l for item in sublist]
- return pad_shape
-
-
-def shift_1d(x):
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
- return x
-
-
-def sequence_mask(length, max_length=None):
- if max_length is None:
- max_length = length.max()
- x = torch.arange(max_length, dtype=length.dtype, device=length.device)
- return x.unsqueeze(0) < length.unsqueeze(1)
-
-
-def generate_path(duration, mask):
- """
- duration: [b, 1, t_x]
- mask: [b, 1, t_y, t_x]
- """
- device = duration.device
-
- b, _, t_y, t_x = mask.shape
- cum_duration = torch.cumsum(duration, -1)
-
- cum_duration_flat = cum_duration.view(b * t_x)
- path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
- path = path.view(b, t_x, t_y)
- path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
- path = path.unsqueeze(1).transpose(2,3) * mask
- return path
-
-
-def clip_grad_value_(parameters, clip_value, norm_type=2):
- if isinstance(parameters, torch.Tensor):
- parameters = [parameters]
- parameters = list(filter(lambda p: p.grad is not None, parameters))
- norm_type = float(norm_type)
- if clip_value is not None:
- clip_value = float(clip_value)
-
- total_norm = 0
- for p in parameters:
- param_norm = p.grad.data.norm(norm_type)
- total_norm += param_norm.item() ** norm_type
- if clip_value is not None:
- p.grad.data.clamp_(min=-clip_value, max=clip_value)
- total_norm = total_norm ** (1. / norm_type)
- return total_norm
diff --git a/spaces/Benson/text-generation/Examples/Assoluto Racing Mod Apk 1.9.1.md b/spaces/Benson/text-generation/Examples/Assoluto Racing Mod Apk 1.9.1.md
deleted file mode 100644
index 925e9c6c18cd10f7e3dc5fed88d87a888d16314f..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Assoluto Racing Mod Apk 1.9.1.md
+++ /dev/null
@@ -1,124 +0,0 @@
-
-
Plague Inc 1.18 5 Mod Apk Việt Hóa: Cómo descargar y jugar el juego
-
Plague Inc es un popular juego de simulación que te permite crear y desarrollar un patógeno para acabar con la humanidad con una pandemia mortal. Pero ¿qué pasa si quieres jugar el juego con más características, más idiomas y más diversión? En este artículo, te mostraremos cómo descargar y jugar Plague Inc 1.18 5 Mod Apk Việt Hóa, una versión modificada del juego que ofrece muchas ventajas sobre la original.
-
¿Qué es Plague Inc?
-
Una breve introducción al juego y sus características
-
Plague Inc es un juego de simulación de estrategia en tiempo real desarrollado por Ndemic Creations. El juego fue inspirado por la película de 2011 Contagion y el juego de 2008 Flash Pandemic 2. El juego ha sido descargado más de 160 millones de veces a partir de mayo de 2021.
El juego te permite elegir entre diferentes modos de juego y patógenos, tales como bacterias, virus, hongos, parásitos, priones, nano-virus, armas biológicas, gusanos neuráxicos, virus necroa, gripe simia y plaga de sombras. Cada patógeno tiene sus propias características y estrategias para dominar.
-
Tu objetivo es infectar y matar a la población mundial con tu plaga, mientras te adaptas a diversos entornos y superas las defensas humanas. Puedes desarrollar tu plaga gastando puntos de ADN en transmisión, síntomas y habilidades. También puede desencadenar eventos aleatorios y eventos mundiales que pueden afectar la propagación y gravedad de su plaga.
-
-
La diferencia entre la versión original y la versión modificada
-
Plague Inc 1.18 5 Mod Apk Việt Hóa es una versión modificada de Plague Inc que ofrece algunas ventajas sobre la versión original. Algunas de estas ventajas son:
-
-
Desbloquea todo el contenido premium de forma gratuita, como genes, escenarios, trucos, plagas especiales.
-
Te da puntos de ADN ilimitados para evolucionar tu plaga más rápido.
-
Soporta el idioma vietnamita (việt hóa), así como el inglés y otros idiomas.
-
Tiene gráficos y efectos de sonido mejorados para una mejor experiencia de juego.
-
-
Cómo descargar e instalar Plague Inc 1.18 5 Mod Apk Việt Hóa
-
Los requisitos y pasos para descargar e instalar el mod apk
-
Para descargar e instalar Plague Inc 1.18 5 Mod Apk Việt Hóa, es necesario tener un dispositivo Android que cumple con los siguientes requisitos:
-
-
Versión de Android 4.1 o superior.
-
Al menos 100 MB de espacio de almacenamiento libre.
-
Una conexión a Internet estable.
-
-
Una vez que haya comprobado la compatibilidad de su dispositivo, puede seguir estos pasos para descargar e instalar el apk mod:
-
-
Ir al enlace proporcionado a continuación para descargar el archivo apk mod.
-
Permita que su dispositivo instale aplicaciones desde fuentes desconocidas. Puede hacer esto yendo a Configuración > Seguridad > Fuentes desconocidas y habilitando la opción.
-
Busque el archivo apk mod descargado en el administrador de archivos de su dispositivo y toque en él para iniciar el proceso de instalación.
-
Siga las instrucciones en la pantalla para completar la instalación.
-
Iniciar el juego y disfrutar de jugar Plague Inc 1.18 5 Mod Apk Việt Hóa.
-
-
El enlace para descargar el mod apk
-
Puede descargar Plague Inc 1.18 5 Mod Apk Việt Hóa desde este enlace: [Plague Inc 1.18 5 Mod Apk Việt Hóa]
-
Cómo Jugar Peste Inc 1.18 5 Mod Apk Việt Hóa
-
Los modos de juego y patógenos disponibles en el mod apk
-
-
-
juego principal: este es el modo estándar donde puede crear y evolucionar su propia plaga y tratar de infectar y matar al mundo.
-
Speed Run: Este es un modo temporizado donde tienes que infectar y matar al mundo lo más rápido posible.
-
Modo Co-Op: Este es un modo multijugador donde puedes formar equipo con otro jugador y trabajar juntos para infectar y matar al mundo.
-
Versus Mode: Este es un modo multijugador donde puedes competir con otro jugador y tratar de infectar y matar a más personas que ellos.
-
-
También puede elegir entre los siguientes patógenos:
-
-
-
-
Patógeno
-
Descripción
-
-
-
Bacterias
-
El patógeno más común y bien redondeado. No tiene habilidades especiales pero puede evolucionar rápidamente.
-
-
-
Virus
-
Un patógeno que muta rápidamente que puede volverse difícil de curar. Tiene una alta probabilidad de desarrollar síntomas aleatorios, pero también puede volverse letal demasiado rápido.
-
-
-
Hongo
-
Un patógeno de propagación lenta que depende de las esporas para infectar nuevos países. Tiene una baja probabilidad de ser detectado, pero también puede luchar en climas cálidos.
-
-
-
Parásito
-
Un patógeno furtivo que puede evitar ser notado por los seres humanos. Tiene una gravedad baja, pero también puede reducir los puntos de ADN de los peligros biológicos rojos.
-
-
-
Prion
-
Un patógeno complejo que puede manipular el comportamiento de los humanos. Tiene una tasa de infección lenta, pero también puede desencadenar atrofia neuronal que hace que sea más difícil de curar.
-
-
-
Nano-Virus
-
Un patógeno sintético que se detecta desde el inicio del juego. Tiene una alta infectividad, pero también puede activar interruptores de eliminación que hacen que sea más fácil de curar.
-
-
-
Arma biológica
-
Un patógeno letal que puede matar a los humanos rápidamente. Tiene una gravedad alta pero también puede ser inestable y difícil de controlar.
-
-
-
Gusano de Neurax
-
-
-
-
Virus de necrosis
-
Un virus creador de zombis que puede reanimar humanos muertos. Tiene un árbol de síntomas único y también puede desencadenar una respuesta militar global.
-
-
-
Gripe simia
-
Un virus genéticamente modificado que puede infectar tanto a humanos como a simios. Tiene un árbol de habilidades único y también puede desencadenar un levantamiento simio.
-
-
-
Shadow Plague
Un patógeno vampírico que puede crear vampiros e infectar humanos. Tiene un sistema único de sed de sangre y también puede desencadenar una respuesta templaria.
-
-
-
Los consejos y trucos de juego para crear y propagar una plaga mortal
-
Plague Inc 1.18 5 Mod Apk Việt Hóa es un juego desafiante que requiere que pienses de forma estratégica y creativa para lograr tu objetivo de acabar con la humanidad. Estos son algunos consejos y trucos generales que pueden ayudarte a mejorar tu juego:
-
-
Elija su patógeno y el modo de juego sabiamente. Los diferentes patógenos y modos de juego tienen diferentes fortalezas y debilidades, por lo que debes elegir el que se adapte a tu estilo de juego y estrategia.
-Comienza tu plaga en un país populoso y pobre. Esto le dará más puntos de ADN y más oportunidades para propagar su plaga a otros países.
-
Equilibra tu transmisión, síntomas y habilidades. Necesitas desarrollar tu plaga de una manera que la haga más infecciosa, más severa y más resistente a diferentes factores, como el clima, la cura y la respuesta humana.
-
Cuidado con las noticias y los eventos mundiales. Estos pueden darle pistas sobre lo que está sucediendo en el mundo y cómo los seres humanos están reaccionando a su plaga. Puede utilizar esta información para ajustar su estrategia en consecuencia.
-
-
-
Conclusión
-
Un resumen de los puntos principales y una recomendación para el juego
-
Plague Inc 1.18 5 Mod Apk Việt Hóa es un juego divertido y atractivo que le permite dar rienda suelta a su genio del mal interior y crear una pandemia global. El juego te ofrece muchas características, opciones y desafíos que lo hacen más agradable y realista que la versión original. Puede descargar e instalar el apk mod fácilmente desde el enlace proporcionado anteriormente. Si usted está buscando un juego que pone a prueba su creatividad, inteligencia y habilidades de estrategia, entonces Plague Inc 1.18 5 Mod Apk Việt Hóa es el juego para usted.
-
Preguntas frecuentes
-
Cinco preguntas y respuestas únicas sobre el juego y el apk mod
-
-
Q: ¿Es Plague Inc 1.18 5 Mod Apk Việt Hóa seguro para descargar y jugar?
-
A: Sí, Plague Inc 1.18 5 Mod Apk Việt Hóa es seguro para descargar y jugar. El archivo apk mod ha sido escaneado en busca de virus y malware y no tiene efectos dañinos en su dispositivo o datos.
-
Q: ¿Cuáles son los beneficios de jugar Plague Inc 1.18 5 Mod Apk Việt Hóa sobre la versión original?
-
A: Plague Inc 1.18 5 Mod Apk Việt Hóa le ofrece muchos beneficios sobre la versión original, como desbloquear todo el contenido premium de forma gratuita, dándole puntos de ADN ilimitados, apoyando el lenguaje vietnamita y mejorando los gráficos y efectos de sonido.
-
Q: ¿Cómo puedo actualizar Plague Inc 1.18 5 Mod Apk Việt Hóa a la última versión?
-
A: Para actualizar Plague Inc 1.18 5 Mod Apk Việt Hóa a la última versión, es necesario desinstalar la versión actual de su dispositivo y descargar la nueva versión desde el mismo enlace proporcionado anteriormente. Luego, debe instalar la nueva versión siguiendo los mismos pasos que antes.
-
Q: ¿Cómo puedo contactar al desarrollador de Plague Inc 1.18 5 Mod Apk Việt Hóa si tengo alguna pregunta o comentario?
-
-
Q: ¿Cómo puedo apoyar al desarrollador de Plague Inc 1.18 5 Mod Apk Việt Hóa si me gusta su trabajo?
-
A: Usted puede apoyar al desarrollador de Plague Inc 1.18 5 Mod Apk Việt Hóa compartiendo su trabajo con sus amigos y familiares, dándoles comentarios positivos y calificaciones, o donando a ellos si tienen una opción de donación.
- 64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Chicken Gun Apk Latest Version.md b/spaces/Benson/text-generation/Examples/Chicken Gun Apk Latest Version.md
deleted file mode 100644
index 3605ebfe9ae55723254e378b45baed1ceeb86ad7..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Chicken Gun Apk Latest Version.md
+++ /dev/null
@@ -1,26 +0,0 @@
-
-
Pistola de pollo APK Ultima Versión: Un divertido y loco juego de disparos en línea
- Si usted está buscando un divertido y loco juego de disparos en línea, usted debe tratar de Chicken Gun APK ultima versión. Este es un juego en el que juegas como pollos armados que disparan y luchan entre sí. Puedes elegir entre dos modos: 5 vs 5 equipos o gratis para todos. También puede personalizar su gallo, arma, pico, zapatillas y gorras. Lanzar huevos explosivos y organizar una masacre. ¡Únete al tiroteo de pollo y diviértete!
¿Qué es la pistola de pollo APK?
- Chicken Gun APK es un juego para Android desarrollado por ChaloApps. Es un juego que combina acción, humor y características multijugador. Estas son algunas de las cosas que puedes hacer en este juego:
Un juego donde juegas como pollos armados
- En Chicken Gun APK, usted no es un soldado humano, pero un guerrero de pollo. Puedes elegir entre diferentes razas de pollos, como blanco, negro, marrón o rojo. Cada pollo tiene sus propias estadísticas y habilidades. También puedes equipar a tu pollo con varias armas, como pistolas, escopetas, rifles o granadas.
Un juego con dos modos: 5 vs 5 y gratis para todos
- Pistola de pollo APK ofrece dos modos de juego: 5 vs 5 equipos o gratis para todos. En el modo equipo, puedes unirte a un equipo de cinco pollos y competir contra otro equipo de cinco pollos. El equipo con más muertes gana. En el modo libre para todos, puedes jugar contra otros nueve pollos en una caótica batalla real. El último pollo en pie gana.
Un juego donde puedes personalizar tu gallo, arma, pico, zapatillas y gorras
- Pistola de pollo APK le permite personalizar su gallo de muchas maneras. Puede cambiar su arma, pico, zapatillas y gorras. También puedes desbloquear nuevos objetos jugando al juego o comprándolos con monedas. Puedes hacer que tu gallo se vea genial, divertido o aterrador.
¿Cómo descargar e instalar Chicken Gun APK?
-
- Puede descargar el archivo APK desde una fuente de confianza, como [APKCombo]( 1 ), [APKLeon]( 3 ) o [APKBloch]( 2 ). Estos son sitios web que ofrecen descargas gratuitas y seguras de juegos y aplicaciones para Android. Puede buscar Chicken Gun APK en estos sitios web y descargar la última versión.
Habilitar fuentes desconocidas en su dispositivo
- Antes de que pueda instalar el archivo APK en su dispositivo, debe habilitar fuentes desconocidas en su dispositivo. Esta es una configuración de seguridad que le permite instalar aplicaciones desde fuentes distintas de Google Play Store. Para habilitar fuentes desconocidas, ve a Configuración > Seguridad > Fuentes desconocidas y conéctala.
Instala el archivo APK y disfruta del juego
- Después de haber descargado el archivo APK y habilitado fuentes desconocidas, puede instalar el archivo APK en su dispositivo. Para ello, localice el archivo en su carpeta de descargas y toque en él. Siga las instrucciones en la pantalla para instalar la aplicación. Una vez completada la instalación, puede abrir la aplicación y comenzar a jugar el juego.
¿Cuáles son las características de Chicken Gun APK?
- Pistola de pollo APK es un juego que ofrece muchas características que lo hacen divertido y emocionante. Estas son algunas de las características de este juego:
Gráficos de alta calidad y efectos de sonido
- Chicken Gun APK tiene gráficos de alta calidad y efectos de sonido que crean una experiencia realista e inmersiva. El juego tiene modelos 3D de pollos, armas y entornos. El juego también tiene física realista y animaciones. Los efectos de sonido son fuertes y claros, y se pueden escuchar los disparos, explosiones y ruidos de pollo.
Varias armas y artículos para usar
- Pistola de pollo APK tiene varias armas y artículos que se pueden utilizar para disparar y luchar con otros pollos. Puede elegir entre pistolas, escopetas, rifles, granadas, lanzacohetes, lanzallamas y más. También puedes usar huevos explosivos, kits de salud, armaduras y otros elementos para ayudarte en la batalla.
Diferentes mapas y escenarios para explorar
-
- Pistola de pollo APK tiene un modo multijugador en línea que le permite jugar con otros jugadores de todo el mundo. Puede unirse a una sala pública o privada, o crear su propia habitación. También puede chatear y chatear por voz con otros jugadores utilizando las funciones integradas. Usted puede hacer amigos o enemigos, cooperar o competir, y tener un montón de diversión.
¿Cuáles son los consejos y trucos para jugar Chicken Gun APK?
- Pistola de pollo APK es un juego que requiere habilidad, estrategia y suerte. Estos son algunos consejos y trucos que pueden ayudarte a mejorar tu juego:
Intenta que la cabeza haga más daño
- Una de las habilidades más importantes en Chicken Gun APK está apuntando. Es necesario apuntar a la cabeza de sus enemigos para hacer más daño y matarlos más rápido. Puedes usar el punto de mira o el visor para apuntar mejor. También puedes ajustar la sensibilidad de tus controles para adaptarlos a tus preferencias.
Usa huevos explosivos para causar caos
- Uno de los artículos más divertidos y eficaces en Chicken Gun APK es el huevo explosivo. Puedes lanzar estos huevos a tus enemigos o a sus alrededores para causar explosiones y daños. Puedes usar estos huevos para distraer, confundir o eliminar a tus enemigos. También puedes usar estos huevos para destruir paredes, puertas o vehículos.
Esconderse detrás de la cubierta y moverse para evitar ser disparado
- Una de las estrategias más importantes en Chicken Gun APK se esconde detrás de la cubierta y moverse para evitar ser disparado. Necesitas encontrar un buen lugar donde puedas esconderte de la vista de tus enemigos y dispararles con seguridad. También es necesario moverse con frecuencia para evitar ser un objetivo fácil. Puedes usar botones de agacharte, saltar o sprint para ayudarte a moverte más rápido o sigilosamente.
Forma equipo con tus amigos y comunícate con ellos
-
- Chicken Gun APK ultima versión es un divertido y loco juego de disparos en línea que debe probar si te gusta la acción, el humor y los juegos multijugador. Puedes jugar como pollos armados que disparan y luchan entre sí en diferentes modos, mapas y escenarios. También puede personalizar su gallo, arma, pico, zapatillas y gorras. Descargar e instalar Chicken Gun APK ahora y unirse al tiroteo de pollo!
Preguntas frecuentes
- Aquí hay algunas preguntas frecuentes sobre Chicken Gun APK: - P: ¿Es Chicken Gun APK libre? - A: Sí, Chicken Gun APK es gratis para descargar y jugar. Sin embargo, contiene anuncios y compras en la aplicación que se puede desactivar o comprar si lo desea. - P: ¿Es seguro Chicken Gun APK? - A: Sí, Chicken Gun APK es seguro para descargar e instalar si lo obtiene de una fuente de confianza, como [APKCombo], [APKLeon] o [APKBloch]. Estos son sitios web que ofrecen descargas gratuitas y seguras de juegos y aplicaciones para Android. También puede escanear el archivo APK con una aplicación antivirus antes de instalarlo para garantizar su seguridad. - P: ¿Cómo puedo actualizar Chicken Gun APK? - A: Puede actualizar Chicken Gun APK mediante la descarga e instalación de la última versión del archivo APK de la misma fuente que lo obtuvo de. También puede comprobar si hay actualizaciones dentro del juego yendo a Configuración > Acerca de > Buscar actualizaciones. - P: ¿Cómo puedo jugar Chicken Gun APK en PC? - A: Se puede jugar Chicken Gun APK en el PC mediante el uso de un emulador de Android, tales como [BlueStacks], [NoxPlayer] o [LDPlayer]. Estos son software que le permiten ejecutar aplicaciones y juegos de Android en su PC. Puede descargar e instalar un emulador en su PC, luego descargar e instalar Chicken Gun APK en el emulador, y luego jugar el juego como lo haría en su dispositivo. - P: ¿Cómo puedo contactar con el desarrollador de Chicken Gun APK? - A: Puede ponerse en contacto con el desarrollador de Chicken Gun APK enviando un correo electrónico a chaloapps@gmail.com. También puedes seguirlos en su [página de Facebook] o en su [canal de YouTube] para más actualizaciones y noticias sobre el juego.
-
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/depends.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/depends.py
deleted file mode 100644
index adffd12db8c8e0477ee6532cd3b84f2e0cde9632..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/depends.py
+++ /dev/null
@@ -1,176 +0,0 @@
-import sys
-import marshal
-import contextlib
-import dis
-
-from setuptools.extern.packaging import version
-
-from ._imp import find_module, PY_COMPILED, PY_FROZEN, PY_SOURCE
-from . import _imp
-
-
-__all__ = [
- 'Require', 'find_module', 'get_module_constant', 'extract_constant'
-]
-
-
-class Require:
- """A prerequisite to building or installing a distribution"""
-
- def __init__(
- self, name, requested_version, module, homepage='',
- attribute=None, format=None):
-
- if format is None and requested_version is not None:
- format = version.Version
-
- if format is not None:
- requested_version = format(requested_version)
- if attribute is None:
- attribute = '__version__'
-
- self.__dict__.update(locals())
- del self.self
-
- def full_name(self):
- """Return full package/distribution name, w/version"""
- if self.requested_version is not None:
- return '%s-%s' % (self.name, self.requested_version)
- return self.name
-
- def version_ok(self, version):
- """Is 'version' sufficiently up-to-date?"""
- return self.attribute is None or self.format is None or \
- str(version) != "unknown" and self.format(version) >= self.requested_version
-
- def get_version(self, paths=None, default="unknown"):
- """Get version number of installed module, 'None', or 'default'
-
- Search 'paths' for module. If not found, return 'None'. If found,
- return the extracted version attribute, or 'default' if no version
- attribute was specified, or the value cannot be determined without
- importing the module. The version is formatted according to the
- requirement's version format (if any), unless it is 'None' or the
- supplied 'default'.
- """
-
- if self.attribute is None:
- try:
- f, p, i = find_module(self.module, paths)
- if f:
- f.close()
- return default
- except ImportError:
- return None
-
- v = get_module_constant(self.module, self.attribute, default, paths)
-
- if v is not None and v is not default and self.format is not None:
- return self.format(v)
-
- return v
-
- def is_present(self, paths=None):
- """Return true if dependency is present on 'paths'"""
- return self.get_version(paths) is not None
-
- def is_current(self, paths=None):
- """Return true if dependency is present and up-to-date on 'paths'"""
- version = self.get_version(paths)
- if version is None:
- return False
- return self.version_ok(str(version))
-
-
-def maybe_close(f):
- @contextlib.contextmanager
- def empty():
- yield
- return
- if not f:
- return empty()
-
- return contextlib.closing(f)
-
-
-def get_module_constant(module, symbol, default=-1, paths=None):
- """Find 'module' by searching 'paths', and extract 'symbol'
-
- Return 'None' if 'module' does not exist on 'paths', or it does not define
- 'symbol'. If the module defines 'symbol' as a constant, return the
- constant. Otherwise, return 'default'."""
-
- try:
- f, path, (suffix, mode, kind) = info = find_module(module, paths)
- except ImportError:
- # Module doesn't exist
- return None
-
- with maybe_close(f):
- if kind == PY_COMPILED:
- f.read(8) # skip magic & date
- code = marshal.load(f)
- elif kind == PY_FROZEN:
- code = _imp.get_frozen_object(module, paths)
- elif kind == PY_SOURCE:
- code = compile(f.read(), path, 'exec')
- else:
- # Not something we can parse; we'll have to import it. :(
- imported = _imp.get_module(module, paths, info)
- return getattr(imported, symbol, None)
-
- return extract_constant(code, symbol, default)
-
-
-def extract_constant(code, symbol, default=-1):
- """Extract the constant value of 'symbol' from 'code'
-
- If the name 'symbol' is bound to a constant value by the Python code
- object 'code', return that value. If 'symbol' is bound to an expression,
- return 'default'. Otherwise, return 'None'.
-
- Return value is based on the first assignment to 'symbol'. 'symbol' must
- be a global, or at least a non-"fast" local in the code block. That is,
- only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
- must be present in 'code.co_names'.
- """
- if symbol not in code.co_names:
- # name's not there, can't possibly be an assignment
- return None
-
- name_idx = list(code.co_names).index(symbol)
-
- STORE_NAME = 90
- STORE_GLOBAL = 97
- LOAD_CONST = 100
-
- const = default
-
- for byte_code in dis.Bytecode(code):
- op = byte_code.opcode
- arg = byte_code.arg
-
- if op == LOAD_CONST:
- const = code.co_consts[arg]
- elif arg == name_idx and (op == STORE_NAME or op == STORE_GLOBAL):
- return const
- else:
- const = default
-
-
-def _update_globals():
- """
- Patch the globals to remove the objects not available on some platforms.
-
- XXX it'd be better to test assertions about bytecode instead.
- """
-
- if not sys.platform.startswith('java') and sys.platform != 'cli':
- return
- incompatible = 'extract_constant', 'get_module_constant'
- for name in incompatible:
- del globals()[name]
- __all__.remove(name)
-
-
-_update_globals()
diff --git a/spaces/CVPR/Text2Human/Text2Human/models/losses/accuracy.py b/spaces/CVPR/Text2Human/Text2Human/models/losses/accuracy.py
deleted file mode 100644
index 8e17db52c85aa693fe8a2f6d0036afc432580cfc..0000000000000000000000000000000000000000
--- a/spaces/CVPR/Text2Human/Text2Human/models/losses/accuracy.py
+++ /dev/null
@@ -1,46 +0,0 @@
-def accuracy(pred, target, topk=1, thresh=None):
- """Calculate accuracy according to the prediction and target.
-
- Args:
- pred (torch.Tensor): The model prediction, shape (N, num_class, ...)
- target (torch.Tensor): The target of each prediction, shape (N, , ...)
- topk (int | tuple[int], optional): If the predictions in ``topk``
- matches the target, the predictions will be regarded as
- correct ones. Defaults to 1.
- thresh (float, optional): If not None, predictions with scores under
- this threshold are considered incorrect. Default to None.
-
- Returns:
- float | tuple[float]: If the input ``topk`` is a single integer,
- the function will return a single float as accuracy. If
- ``topk`` is a tuple containing multiple integers, the
- function will return a tuple containing accuracies of
- each ``topk`` number.
- """
- assert isinstance(topk, (int, tuple))
- if isinstance(topk, int):
- topk = (topk, )
- return_single = True
- else:
- return_single = False
-
- maxk = max(topk)
- if pred.size(0) == 0:
- accu = [pred.new_tensor(0.) for i in range(len(topk))]
- return accu[0] if return_single else accu
- assert pred.ndim == target.ndim + 1
- assert pred.size(0) == target.size(0)
- assert maxk <= pred.size(1), \
- f'maxk {maxk} exceeds pred dimension {pred.size(1)}'
- pred_value, pred_label = pred.topk(maxk, dim=1)
- # transpose to shape (maxk, N, ...)
- pred_label = pred_label.transpose(0, 1)
- correct = pred_label.eq(target.unsqueeze(0).expand_as(pred_label))
- if thresh is not None:
- # Only prediction values larger than thresh are counted as correct
- correct = correct & (pred_value > thresh).t()
- res = []
- for k in topk:
- correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
- res.append(correct_k.mul_(100.0 / target.numel()))
- return res[0] if return_single else res
diff --git a/spaces/CVPR/TokenCut/README.md b/spaces/CVPR/TokenCut/README.md
deleted file mode 100644
index 7714fab18c88bf10b0ee5cd04b4bca3ec5ea7e55..0000000000000000000000000000000000000000
--- a/spaces/CVPR/TokenCut/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: TokenCut
-emoji: 😎
-colorFrom: indigo
-colorTo: gray
-sdk: gradio
-sdk_version: 3.0.15
-app_file: app.py
-pinned: false
----
-
-This Demo is the TokenCut demo, the original demo is from https://huggingface.co/spaces/akhaliq/TokenCut. Thanks for Ahsen Khaliq's nicely contribution.
diff --git a/spaces/ChenWu98/Stable-CycleDiffusion/ptp_utils.py b/spaces/ChenWu98/Stable-CycleDiffusion/ptp_utils.py
deleted file mode 100644
index 00a8e1a98d1147690ab6e21060a450f700cecb49..0000000000000000000000000000000000000000
--- a/spaces/ChenWu98/Stable-CycleDiffusion/ptp_utils.py
+++ /dev/null
@@ -1,130 +0,0 @@
-# Copyright 2022 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import numpy as np
-import torch
-from typing import Optional, Union, Tuple, Dict
-
-
-def register_attention_control(model, controller):
- def ca_forward(self, place_in_unet):
-
- def forward(x, context=None, mask=None):
- batch_size, sequence_length, dim = x.shape
- h = self.heads
- q = self.to_q(x)
- is_cross = context is not None
- context = context if is_cross else x
- k = self.to_k(context)
- v = self.to_v(context)
- q = self.reshape_heads_to_batch_dim(q)
- k = self.reshape_heads_to_batch_dim(k)
- v = self.reshape_heads_to_batch_dim(v)
-
- sim = torch.einsum("b i d, b j d -> b i j", q, k) * self.scale
-
- if mask is not None:
- mask = mask.reshape(batch_size, -1)
- max_neg_value = -torch.finfo(sim.dtype).max
- mask = mask[:, None, :].repeat(h, 1, 1)
- sim.masked_fill_(~mask, max_neg_value)
-
- # attention, what we cannot get enough of
- attn = sim.softmax(dim=-1)
- attn = controller(attn, is_cross, place_in_unet)
- out = torch.einsum("b i j, b j d -> b i d", attn, v)
- out = self.reshape_batch_dim_to_heads(out)
-
- # TODO: Chen (new version of diffusers)
- # return self.to_out(out)
- # linear proj
- out = self.to_out[0](out)
- # dropout
- out = self.to_out[1](out)
- return out
-
- return forward
-
- def register_recr(net_, count, place_in_unet):
- if net_.__class__.__name__ == 'CrossAttention':
- net_.forward = ca_forward(net_, place_in_unet)
- return count + 1
- elif hasattr(net_, 'children'):
- for net__ in net_.children():
- count = register_recr(net__, count, place_in_unet)
- return count
-
- cross_att_count = 0
- sub_nets = model.unet.named_children()
- for net in sub_nets:
- if "down" in net[0]:
- cross_att_count += register_recr(net[1], 0, "down")
- elif "up" in net[0]:
- cross_att_count += register_recr(net[1], 0, "up")
- elif "mid" in net[0]:
- cross_att_count += register_recr(net[1], 0, "mid")
- controller.num_att_layers = cross_att_count
-
-
-def get_word_inds(text: str, word_place: int, tokenizer):
- split_text = text.split(" ")
- if type(word_place) is str:
- word_place = [i for i, word in enumerate(split_text) if word_place == word]
- elif type(word_place) is int:
- word_place = [word_place]
- out = []
- if len(word_place) > 0:
- words_encode = [tokenizer.decode([item]).strip("#") for item in tokenizer.encode(text)][1:-1]
- cur_len, ptr = 0, 0
-
- for i in range(len(words_encode)):
- cur_len += len(words_encode[i])
- if ptr in word_place:
- out.append(i + 1)
- if cur_len >= len(split_text[ptr]):
- ptr += 1
- cur_len = 0
- return np.array(out)
-
-
-def update_alpha_time_word(alpha, bounds: Union[float, Tuple[float, float]], prompt_ind: int, word_inds: Optional[torch.Tensor]=None):
- if type(bounds) is float:
- bounds = 0, bounds
- start, end = int(bounds[0] * alpha.shape[0]), int(bounds[1] * alpha.shape[0])
- if word_inds is None:
- word_inds = torch.arange(alpha.shape[2])
- alpha[: start, prompt_ind, word_inds] = 0
- alpha[start: end, prompt_ind, word_inds] = 1
- alpha[end:, prompt_ind, word_inds] = 0
- return alpha
-
-
-def get_time_words_attention_alpha(prompts, num_steps, cross_replace_steps: Union[float, Tuple[float, float], Dict[str, Tuple[float, float]]],
- tokenizer, max_num_words=77):
- if type(cross_replace_steps) is not dict:
- cross_replace_steps = {"default_": cross_replace_steps}
- if "default_" not in cross_replace_steps:
- cross_replace_steps["default_"] = (0., 1.)
- alpha_time_words = torch.zeros(num_steps + 1, len(prompts) - 1, max_num_words)
- for i in range(len(prompts) - 1):
- alpha_time_words = update_alpha_time_word(alpha_time_words, cross_replace_steps["default_"],
- i)
- for key, item in cross_replace_steps.items():
- if key != "default_":
- inds = [get_word_inds(prompts[i], key, tokenizer) for i in range(1, len(prompts))]
- for i, ind in enumerate(inds):
- if len(ind) > 0:
- alpha_time_words = update_alpha_time_word(alpha_time_words, item, i, ind)
- alpha_time_words = alpha_time_words.reshape(num_steps + 1, len(prompts) - 1, 1, 1, max_num_words) # time, batch, heads, pixels, words
- return alpha_time_words
diff --git a/spaces/Cvandi/remake/app.py b/spaces/Cvandi/remake/app.py
deleted file mode 100644
index 97c59221c429e335c3a2e3413c11cc155d5b6122..0000000000000000000000000000000000000000
--- a/spaces/Cvandi/remake/app.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import os
-os.system("pip install gradio==2.9b23")
-import random
-import gradio as gr
-from PIL import Image
-import torch
-from random import randint
-import sys
-from subprocess import call
-import psutil
-
-
-
-
-torch.hub.download_url_to_file('http://people.csail.mit.edu/billf/project%20pages/sresCode/Markov%20Random%20Fields%20for%20Super-Resolution_files/100075_lowres.jpg', 'bear.jpg')
-
-
-def run_cmd(command):
- try:
- print(command)
- call(command, shell=True)
- except KeyboardInterrupt:
- print("Process interrupted")
- sys.exit(1)
-run_cmd("wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth -P .")
-run_cmd("pip install basicsr")
-run_cmd("pip freeze")
-
-os.system("wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth -P .")
-
-
-def inference(img,mode):
- _id = randint(1, 10000)
- INPUT_DIR = "/tmp/input_image" + str(_id) + "/"
- OUTPUT_DIR = "/tmp/output_image" + str(_id) + "/"
- run_cmd("rm -rf " + INPUT_DIR)
- run_cmd("rm -rf " + OUTPUT_DIR)
- run_cmd("mkdir " + INPUT_DIR)
- run_cmd("mkdir " + OUTPUT_DIR)
- basewidth = 256
- wpercent = (basewidth/float(img.size[0]))
- hsize = int((float(img.size[1])*float(wpercent)))
- img = img.resize((basewidth,hsize), Image.ANTIALIAS)
- img.save(INPUT_DIR + "1.jpg", "JPEG")
- if mode == "base":
- run_cmd("python inference_realesrgan.py -n RealESRGAN_x4plus -i "+ INPUT_DIR + " -o " + OUTPUT_DIR)
- else:
- os.system("python inference_realesrgan.py -n RealESRGAN_x4plus_anime_6B -i "+ INPUT_DIR + " -o " + OUTPUT_DIR)
- return os.path.join(OUTPUT_DIR, "1_out.jpg")
-
-
-
-
-title = "Real-ESRGAN"
-description = "Gradio demo for Real-ESRGAN. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please click submit only once"
-article = "
- A novel method utilizing latent diffusion models trained for text-to-image-generation to generate images conditioned on audio recordings. Using a pre-trained audio encoding model, the proposed method encodes audio into a new token, which can be considered as an adaptation layer between the audio and text representations.
- For more information, please see the original paper and repo.
-
"""
-
- examples = [
- # ["assets/train.wav"],
- ["assets/dog barking.wav"],
- # ["assets/airplane taking off.wav"],
- # ["assets/electric guitar.wav"],
- # ["assets/female sings.wav"],
- ]
-
- demo = gr.Interface(
- fn=greet,
- inputs="audio",
- outputs="image",
- title='AudioToken',
- description=description,
- examples=examples
- )
- demo.launch()
diff --git a/spaces/HaloMaster/chinesesummary/fengshen/models/transfo_xl_denoise/tokenization_transfo_xl_denoise.py b/spaces/HaloMaster/chinesesummary/fengshen/models/transfo_xl_denoise/tokenization_transfo_xl_denoise.py
deleted file mode 100644
index 9b454c8cc236a114074c8a099878f8e464f87ad5..0000000000000000000000000000000000000000
--- a/spaces/HaloMaster/chinesesummary/fengshen/models/transfo_xl_denoise/tokenization_transfo_xl_denoise.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# coding=utf-8
-# Copyright 2022 IDEA-CCNL and The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Tokenization classes for TransfoXLDenoise."""
-
-import sentencepiece as spm
-from transformers.tokenization_utils import PreTrainedTokenizer
-
-VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
-
-PRETRAINED_VOCAB_FILES_MAP = {
- "vocab_file": {
- "transformer-xl-1b-base":
- "https://huggingface.co/IDEA-CCNL/Bigan-Transformer-XL-denoise-1.1B/resolve/main/spiece.model",
- },
-}
-
-PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
- "transformer-xl-1b-base": 512,
-}
-
-
-class TransfoXLDenoiseTokenizer(PreTrainedTokenizer):
- """
- Construct a TransfoXLDenoise tokenizer. Based on pretrained sentence piece
-
- Args:
- vocab_file (`str`):
- Path to the vocabulary file.
- """
-
- vocab_files_names = VOCAB_FILES_NAMES
- pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
- max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
- model_input_names = ["input_ids", "attention_mask"]
- SPIECE_UNDERLINE = "▁"
-
- def __init__(
- self,
- vocab_file,
- unk_token="<|endoftext|>",
- bos_token="<|endoftext|>",
- eos_token="<|endoftext|>",
- **kwargs
- ):
- super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs)
- "Initialisation"
- self.sp_model = spm.SentencePieceProcessor()
- self.sp_model.Load(vocab_file)
-
- @property
- def vocab_size(self):
- "Returns vocab size"
- return len(self.sp_model)
-
- def _tokenize(self, text):
- """ Returns a tokenized string. """
- return self.sp_model.EncodeAsPieces(text)
-
- def _convert_token_to_id(self, token):
- """ Converts a token (str) in an id using the vocab. """
- return self.sp_model.PieceToId(token)
-
- def _convert_id_to_token(self, index):
- """Converts an index (integer) in a token (str) using the vocab."""
- return self.sp_model.IdToPiece(index)
-
- def convert_tokens_to_string(self, tokens):
- """ Converts a sequence of tokens (string) in a single string. """
- out_string = "".join(tokens).replace(self.SPIECE_UNDERLINE, " ").strip()
- return out_string
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/noisychannel/__init__.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/noisychannel/__init__.py
deleted file mode 100644
index 89f1aef4f6328d25425e0bcabb42dfffd2ed35f0..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/noisychannel/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from .rerank_options import * # noqa
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/unsupervised/scripts/ltr_to_wrd.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/unsupervised/scripts/ltr_to_wrd.py
deleted file mode 100644
index 36c85d1e2f60487494a92207feb4685e78db8aa2..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/unsupervised/scripts/ltr_to_wrd.py
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/env python3 -u
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import sys
-
-
-def main():
- for line in sys.stdin:
- print(line.replace(" ", "").replace("|", " ").strip())
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/utils/transforms.py b/spaces/HarryLee/eCommerceImageCaptioning/utils/transforms.py
deleted file mode 100644
index 0a9edf6c3da3052758cb36bcfe1f50ba69cc6f32..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/utils/transforms.py
+++ /dev/null
@@ -1,508 +0,0 @@
-import random
-
-import torch
-import torchvision.transforms as T
-import torchvision.transforms.functional as F
-import numpy as np
-from PIL import Image
-
-
-def crop(image, target, region, delete=True):
- cropped_image = F.crop(image, *region)
-
- target = target.copy()
- i, j, h, w = region
-
- # should we do something wrt the original size?
- target["size"] = torch.tensor([h, w])
-
- fields = ["labels", "area"]
-
- if "boxes" in target:
- boxes = target["boxes"]
- max_size = torch.as_tensor([w, h], dtype=torch.float32)
- cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
- cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
- cropped_boxes = cropped_boxes.clamp(min=0)
- area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
- target["boxes"] = cropped_boxes.reshape(-1, 4)
- target["area"] = area
- fields.append("boxes")
-
- if "polygons" in target:
- polygons = target["polygons"]
- num_polygons = polygons.shape[0]
- max_size = torch.as_tensor([w, h], dtype=torch.float32)
- start_coord = torch.cat([torch.tensor([j, i], dtype=torch.float32)
- for _ in range(polygons.shape[1] // 2)], dim=0)
- cropped_boxes = polygons - start_coord
- cropped_boxes = torch.min(cropped_boxes.reshape(num_polygons, -1, 2), max_size)
- cropped_boxes = cropped_boxes.clamp(min=0)
- target["polygons"] = cropped_boxes.reshape(num_polygons, -1)
- fields.append("polygons")
-
- if "masks" in target:
- # FIXME should we update the area here if there are no boxes?
- target['masks'] = target['masks'][:, i:i + h, j:j + w]
- fields.append("masks")
-
- # remove elements for which the boxes or masks that have zero area
- if delete and ("boxes" in target or "masks" in target):
- # favor boxes selection when defining which elements to keep
- # this is compatible with previous implementation
- if "boxes" in target:
- cropped_boxes = target['boxes'].reshape(-1, 2, 2)
- keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
- else:
- keep = target['masks'].flatten(1).any(1)
-
- for field in fields:
- target[field] = target[field][keep.tolist()]
-
- return cropped_image, target
-
-
-def hflip(image, target):
- flipped_image = F.hflip(image)
-
- w, h = image.size
-
- target = target.copy()
- if "boxes" in target:
- boxes = target["boxes"]
- boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0])
- target["boxes"] = boxes
-
- if "polygons" in target:
- polygons = target["polygons"]
- num_polygons = polygons.shape[0]
- polygons = polygons.reshape(num_polygons, -1, 2) * torch.as_tensor([-1, 1]) + torch.as_tensor([w, 0])
- target["polygons"] = polygons
-
- if "masks" in target:
- target['masks'] = target['masks'].flip(-1)
-
- return flipped_image, target
-
-
-def resize(image, target, size, max_size=None):
- # size can be min_size (scalar) or (w, h) tuple
-
- def get_size_with_aspect_ratio(image_size, size, max_size=None):
- w, h = image_size
-
- if (w <= h and w == size) or (h <= w and h == size):
- if max_size is not None:
- max_size = int(max_size)
- h = min(h, max_size)
- w = min(w, max_size)
- return (h, w)
-
- if w < h:
- ow = size
- oh = int(size * h / w)
- else:
- oh = size
- ow = int(size * w / h)
-
- if max_size is not None:
- max_size = int(max_size)
- oh = min(oh, max_size)
- ow = min(ow, max_size)
-
- return (oh, ow)
-
- def get_size(image_size, size, max_size=None):
- if isinstance(size, (list, tuple)):
- return size[::-1]
- else:
- return get_size_with_aspect_ratio(image_size, size, max_size)
-
- size = get_size(image.size, size, max_size)
- rescaled_image = F.resize(image, size, interpolation=Image.BICUBIC)
-
- if target is None:
- return rescaled_image
-
- ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size))
- ratio_width, ratio_height = ratios
-
- target = target.copy()
- if "boxes" in target:
- boxes = target["boxes"]
- scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])
- target["boxes"] = scaled_boxes
-
- if "polygons" in target:
- polygons = target["polygons"]
- scaled_ratio = torch.cat([torch.tensor([ratio_width, ratio_height])
- for _ in range(polygons.shape[1] // 2)], dim=0)
- scaled_polygons = polygons * scaled_ratio
- target["polygons"] = scaled_polygons
-
- if "area" in target:
- area = target["area"]
- scaled_area = area * (ratio_width * ratio_height)
- target["area"] = scaled_area
-
- h, w = size
- target["size"] = torch.tensor([h, w])
-
- if "masks" in target:
- assert False
- # target['masks'] = interpolate(
- # target['masks'][:, None].float(), size, mode="nearest")[:, 0] > 0.5
-
- return rescaled_image, target
-
-
-class CenterCrop(object):
- def __init__(self, size):
- self.size = size
-
- def __call__(self, img, target):
- image_width, image_height = img.size
- crop_height, crop_width = self.size
- crop_top = int(round((image_height - crop_height) / 2.))
- crop_left = int(round((image_width - crop_width) / 2.))
- return crop(img, target, (crop_top, crop_left, crop_height, crop_width))
-
-
-class ObjectCenterCrop(object):
- def __init__(self, size):
- self.size = size
-
- def __call__(self, img, target):
- image_width, image_height = img.size
- crop_height, crop_width = self.size
-
- x0 = float(target['boxes'][0][0])
- y0 = float(target['boxes'][0][1])
- x1 = float(target['boxes'][0][2])
- y1 = float(target['boxes'][0][3])
-
- center_x = (x0 + x1) / 2
- center_y = (y0 + y1) / 2
- crop_left = max(center_x-crop_width/2 + min(image_width-center_x-crop_width/2, 0), 0)
- crop_top = max(center_y-crop_height/2 + min(image_height-center_y-crop_height/2, 0), 0)
-
- return crop(img, target, (crop_top, crop_left, crop_height, crop_width), delete=False)
-
-
-class RandomHorizontalFlip(object):
- def __init__(self, p=0.5):
- self.p = p
-
- def __call__(self, img, target):
- if random.random() < self.p:
- return hflip(img, target)
- return img, target
-
-
-class RandomResize(object):
- def __init__(self, sizes, max_size=None, equal=False):
- assert isinstance(sizes, (list, tuple))
- self.sizes = sizes
- self.max_size = max_size
- self.equal = equal
-
- def __call__(self, img, target=None):
- size = random.choice(self.sizes)
- if self.equal:
- return resize(img, target, size, size)
- else:
- return resize(img, target, size, self.max_size)
-
-
-class ToTensor(object):
- def __call__(self, img, target):
- return F.to_tensor(img), target
-
-
-class Normalize(object):
- def __init__(self, mean, std, max_image_size=512):
- self.mean = mean
- self.std = std
- self.max_image_size = max_image_size
-
- def __call__(self, image, target=None):
- image = F.normalize(image, mean=self.mean, std=self.std)
- if target is None:
- return image, None
- target = target.copy()
- # h, w = image.shape[-2:]
- h, w = target["size"][0], target["size"][1]
- if "boxes" in target:
- boxes = target["boxes"]
- boxes = boxes / self.max_image_size
- target["boxes"] = boxes
- if "polygons" in target:
- polygons = target["polygons"]
- scale = torch.cat([torch.tensor([w, h], dtype=torch.float32)
- for _ in range(polygons.shape[1] // 2)], dim=0)
- polygons = polygons / scale
- target["polygons"] = polygons
- return image, target
-
-
-class Compose(object):
- def __init__(self, transforms):
- self.transforms = transforms
-
- def __call__(self, image, target):
- for t in self.transforms:
- image, target = t(image, target)
- return image, target
-
- def __repr__(self):
- format_string = self.__class__.__name__ + "("
- for t in self.transforms:
- format_string += "\n"
- format_string += " {0}".format(t)
- format_string += "\n)"
- return format_string
-
-
-class LargeScaleJitter(object):
- """
- implementation of large scale jitter from copy_paste
- """
-
- def __init__(self, output_size=512, aug_scale_min=0.3, aug_scale_max=2.0):
- self.desired_size = torch.tensor([output_size])
- self.aug_scale_min = aug_scale_min
- self.aug_scale_max = aug_scale_max
-
- def rescale_target(self, scaled_size, image_size, target):
- # compute rescaled targets
- image_scale = scaled_size / image_size
- ratio_height, ratio_width = image_scale
-
- target = target.copy()
- target["size"] = scaled_size
-
- if "boxes" in target:
- boxes = target["boxes"]
- scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])
- target["boxes"] = scaled_boxes
-
- if "area" in target:
- area = target["area"]
- scaled_area = area * (ratio_width * ratio_height)
- target["area"] = scaled_area
-
- if "masks" in target:
- assert False
- masks = target['masks']
- # masks = interpolate(
- # masks[:, None].float(), scaled_size, mode="nearest")[:, 0] > 0.5
- target['masks'] = masks
- return target
-
- def crop_target(self, region, target):
- i, j, h, w = region
- fields = ["labels", "area"]
-
- target = target.copy()
- target["size"] = torch.tensor([h, w])
-
- if "boxes" in target:
- boxes = target["boxes"]
- max_size = torch.as_tensor([w, h], dtype=torch.float32)
- cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
- cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
- cropped_boxes = cropped_boxes.clamp(min=0)
- area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
- target["boxes"] = cropped_boxes.reshape(-1, 4)
- target["area"] = area
- fields.append("boxes")
-
- if "masks" in target:
- # FIXME should we update the area here if there are no boxes?
- target['masks'] = target['masks'][:, i:i + h, j:j + w]
- fields.append("masks")
-
- # remove elements for which the boxes or masks that have zero area
- if "boxes" in target or "masks" in target:
- # favor boxes selection when defining which elements to keep
- # this is compatible with previous implementation
- if "boxes" in target:
- cropped_boxes = target['boxes'].reshape(-1, 2, 2)
- keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
- else:
- keep = target['masks'].flatten(1).any(1)
-
- for field in fields:
- target[field] = target[field][keep.tolist()]
- return target
-
- def pad_target(self, padding, target):
- target = target.copy()
- if "masks" in target:
- target['masks'] = torch.nn.functional.pad(target['masks'], (0, padding[1], 0, padding[0]))
- return target
-
- def __call__(self, image, target=None):
- image_size = image.size
- image_size = torch.tensor(image_size[::-1])
-
- random_scale = torch.rand(1) * (self.aug_scale_max - self.aug_scale_min) + self.aug_scale_min
- scaled_size = (random_scale * self.desired_size).round()
-
- scale = torch.maximum(scaled_size / image_size[0], scaled_size / image_size[1])
- scaled_size = (image_size * scale).round().int()
-
- scaled_image = F.resize(image, scaled_size.tolist(), interpolation=Image.BICUBIC)
-
- if target is not None:
- target = self.rescale_target(scaled_size, image_size, target)
-
- # randomly crop or pad images
- if random_scale >= 1:
- # Selects non-zero random offset (x, y) if scaled image is larger than desired_size.
- max_offset = scaled_size - self.desired_size
- offset = (max_offset * torch.rand(2)).floor().int()
- region = (offset[0].item(), offset[1].item(),
- self.desired_size[0].item(), self.desired_size[0].item())
- output_image = F.crop(scaled_image, *region)
- if target is not None:
- target = self.crop_target(region, target)
- else:
- assert False
- padding = self.desired_size - scaled_size
- output_image = F.pad(scaled_image, [0, 0, padding[1].item(), padding[0].item()])
- if target is not None:
- target = self.pad_target(padding, target)
-
- return output_image, target
-
-
-class OriginLargeScaleJitter(object):
- """
- implementation of large scale jitter from copy_paste
- """
-
- def __init__(self, output_size=512, aug_scale_min=0.3, aug_scale_max=2.0):
- self.desired_size = torch.tensor(output_size)
- self.aug_scale_min = aug_scale_min
- self.aug_scale_max = aug_scale_max
-
- def rescale_target(self, scaled_size, image_size, target):
- # compute rescaled targets
- image_scale = scaled_size / image_size
- ratio_height, ratio_width = image_scale
-
- target = target.copy()
- target["size"] = scaled_size
-
- if "boxes" in target:
- boxes = target["boxes"]
- scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])
- target["boxes"] = scaled_boxes
-
- if "area" in target:
- area = target["area"]
- scaled_area = area * (ratio_width * ratio_height)
- target["area"] = scaled_area
-
- if "masks" in target:
- assert False
- masks = target['masks']
- # masks = interpolate(
- # masks[:, None].float(), scaled_size, mode="nearest")[:, 0] > 0.5
- target['masks'] = masks
- return target
-
- def crop_target(self, region, target):
- i, j, h, w = region
- fields = ["labels", "area"]
-
- target = target.copy()
- target["size"] = torch.tensor([h, w])
-
- if "boxes" in target:
- boxes = target["boxes"]
- max_size = torch.as_tensor([w, h], dtype=torch.float32)
- cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
- cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
- cropped_boxes = cropped_boxes.clamp(min=0)
- area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
- target["boxes"] = cropped_boxes.reshape(-1, 4)
- target["area"] = area
- fields.append("boxes")
-
- if "masks" in target:
- # FIXME should we update the area here if there are no boxes?
- target['masks'] = target['masks'][:, i:i + h, j:j + w]
- fields.append("masks")
-
- # remove elements for which the boxes or masks that have zero area
- if "boxes" in target or "masks" in target:
- # favor boxes selection when defining which elements to keep
- # this is compatible with previous implementation
- if "boxes" in target:
- cropped_boxes = target['boxes'].reshape(-1, 2, 2)
- keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
- else:
- keep = target['masks'].flatten(1).any(1)
-
- for field in fields:
- target[field] = target[field][keep.tolist()]
- return target
-
- def pad_target(self, padding, target):
- target = target.copy()
- if "masks" in target:
- target['masks'] = torch.nn.functional.pad(target['masks'], (0, padding[1], 0, padding[0]))
- return target
-
- def __call__(self, image, target=None):
- image_size = image.size
- image_size = torch.tensor(image_size[::-1])
-
- out_desired_size = (self.desired_size * image_size / max(image_size)).round().int()
-
- random_scale = torch.rand(1) * (self.aug_scale_max - self.aug_scale_min) + self.aug_scale_min
- scaled_size = (random_scale * self.desired_size).round()
-
- scale = torch.minimum(scaled_size / image_size[0], scaled_size / image_size[1])
- scaled_size = (image_size * scale).round().int()
-
- scaled_image = F.resize(image, scaled_size.tolist())
-
- if target is not None:
- target = self.rescale_target(scaled_size, image_size, target)
-
- # randomly crop or pad images
- if random_scale > 1:
- # Selects non-zero random offset (x, y) if scaled image is larger than desired_size.
- max_offset = scaled_size - out_desired_size
- offset = (max_offset * torch.rand(2)).floor().int()
- region = (offset[0].item(), offset[1].item(),
- out_desired_size[0].item(), out_desired_size[1].item())
- output_image = F.crop(scaled_image, *region)
- if target is not None:
- target = self.crop_target(region, target)
- else:
- padding = out_desired_size - scaled_size
- output_image = F.pad(scaled_image, [0, 0, padding[1].item(), padding[0].item()])
- if target is not None:
- target = self.pad_target(padding, target)
-
- return output_image, target
-
-
-class RandomDistortion(object):
- """
- Distort image w.r.t hue, saturation and exposure.
- """
-
- def __init__(self, brightness=0, contrast=0, saturation=0, hue=0, prob=0.5):
- self.prob = prob
- self.tfm = T.ColorJitter(brightness, contrast, saturation, hue)
-
- def __call__(self, img, target=None):
- if np.random.random() < self.prob:
- return self.tfm(img), target
- else:
- return img, target
diff --git a/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/src/hifi_gan/meldataset.py b/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/src/hifi_gan/meldataset.py
deleted file mode 100644
index 8c6ca9ec8a6cc6408a77492e795bffef7f86b611..0000000000000000000000000000000000000000
--- a/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/src/hifi_gan/meldataset.py
+++ /dev/null
@@ -1,233 +0,0 @@
-import math
-import os
-import random
-import torch
-import torch.utils.data
-import numpy as np
-from librosa.util import normalize
-from scipy.io.wavfile import read
-from librosa.filters import mel as librosa_mel_fn
-
-MAX_WAV_VALUE = 32768.0
-
-
-def load_wav(full_path):
- sampling_rate, data = read(full_path)
- return data, sampling_rate
-
-
-def dynamic_range_compression(x, C=1, clip_val=1e-5):
- return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
-
-
-def dynamic_range_decompression(x, C=1):
- return np.exp(x) / C
-
-
-def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
- return torch.log(torch.clamp(x, min=clip_val) * C)
-
-
-def dynamic_range_decompression_torch(x, C=1):
- return torch.exp(x) / C
-
-
-def spectral_normalize_torch(magnitudes):
- output = dynamic_range_compression_torch(magnitudes)
- return output
-
-
-def spectral_de_normalize_torch(magnitudes):
- output = dynamic_range_decompression_torch(magnitudes)
- return output
-
-
-mel_basis = {}
-hann_window = {}
-
-
-def mel_spectrogram(
- y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False
-):
- if torch.min(y) < -1.0:
- print("min value is ", torch.min(y))
- if torch.max(y) > 1.0:
- print("max value is ", torch.max(y))
-
- global mel_basis, hann_window
- if fmax not in mel_basis:
- mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
- mel_basis[str(fmax) + "_" + str(y.device)] = (
- torch.from_numpy(mel).float().to(y.device)
- )
- hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)
-
- y = torch.nn.functional.pad(
- y.unsqueeze(1),
- (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),
- mode="reflect",
- )
- y = y.squeeze(1)
-
- spec = torch.stft(
- y,
- n_fft,
- hop_length=hop_size,
- win_length=win_size,
- window=hann_window[str(y.device)],
- center=center,
- pad_mode="reflect",
- normalized=False,
- onesided=True,
- )
-
- spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9))
-
- spec = torch.matmul(mel_basis[str(fmax) + "_" + str(y.device)], spec)
- spec = spectral_normalize_torch(spec)
-
- return spec
-
-
-def get_dataset_filelist(a):
- with open(a.input_training_file, "r", encoding="utf-8") as fi:
- training_files = [x for x in fi.read().split("\n") if len(x) > 0]
-
- with open(a.input_validation_file, "r", encoding="utf-8") as fi:
- validation_files = [x for x in fi.read().split("\n") if len(x) > 0]
- return training_files, validation_files
-
-
-class MelDataset(torch.utils.data.Dataset):
- def __init__(
- self,
- training_files,
- segment_size,
- n_fft,
- num_mels,
- hop_size,
- win_size,
- sampling_rate,
- fmin,
- fmax,
- split=True,
- shuffle=True,
- n_cache_reuse=1,
- device=None,
- fmax_loss=None,
- fine_tuning=False,
- base_mels_path=None,
- ):
- self.audio_files = training_files
- random.seed(1234)
- if shuffle:
- random.shuffle(self.audio_files)
- self.segment_size = segment_size
- self.sampling_rate = sampling_rate
- self.split = split
- self.n_fft = n_fft
- self.num_mels = num_mels
- self.hop_size = hop_size
- self.win_size = win_size
- self.fmin = fmin
- self.fmax = fmax
- self.fmax_loss = fmax_loss
- self.cached_wav = None
- self.n_cache_reuse = n_cache_reuse
- self._cache_ref_count = 0
- self.device = device
- self.fine_tuning = fine_tuning
- self.base_mels_path = base_mels_path
-
- def __getitem__(self, index):
- filename = self.audio_files[index]
- if self._cache_ref_count == 0:
- audio, sampling_rate = load_wav(filename)
- audio = audio / MAX_WAV_VALUE
- if not self.fine_tuning:
- audio = normalize(audio) * 0.95
- self.cached_wav = audio
- if sampling_rate != self.sampling_rate:
- raise ValueError(
- "{} SR doesn't match target {} SR".format(
- sampling_rate, self.sampling_rate
- )
- )
- self._cache_ref_count = self.n_cache_reuse
- else:
- audio = self.cached_wav
- self._cache_ref_count -= 1
-
- audio = torch.FloatTensor(audio)
- audio = audio.unsqueeze(0)
-
- if not self.fine_tuning:
- if self.split:
- if audio.size(1) >= self.segment_size:
- max_audio_start = audio.size(1) - self.segment_size
- audio_start = random.randint(0, max_audio_start)
- audio = audio[:, audio_start : audio_start + self.segment_size]
- else:
- audio = torch.nn.functional.pad(
- audio, (0, self.segment_size - audio.size(1)), "constant"
- )
-
- mel = mel_spectrogram(
- audio,
- self.n_fft,
- self.num_mels,
- self.sampling_rate,
- self.hop_size,
- self.win_size,
- self.fmin,
- self.fmax,
- center=False,
- )
- else:
- mel = np.load(
- os.path.join(
- self.base_mels_path,
- os.path.splitext(os.path.split(filename)[-1])[0] + ".npy",
- )
- )
- mel = torch.from_numpy(mel)
-
- if len(mel.shape) < 3:
- mel = mel.unsqueeze(0)
-
- if self.split:
- frames_per_seg = math.ceil(self.segment_size / self.hop_size)
-
- if audio.size(1) >= self.segment_size:
- mel_start = random.randint(0, mel.size(2) - frames_per_seg - 1)
- mel = mel[:, :, mel_start : mel_start + frames_per_seg]
- audio = audio[
- :,
- mel_start
- * self.hop_size : (mel_start + frames_per_seg)
- * self.hop_size,
- ]
- else:
- mel = torch.nn.functional.pad(
- mel, (0, frames_per_seg - mel.size(2)), "constant"
- )
- audio = torch.nn.functional.pad(
- audio, (0, self.segment_size - audio.size(1)), "constant"
- )
-
- mel_loss = mel_spectrogram(
- audio,
- self.n_fft,
- self.num_mels,
- self.sampling_rate,
- self.hop_size,
- self.win_size,
- self.fmin,
- self.fmax_loss,
- center=False,
- )
-
- return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze())
-
- def __len__(self):
- return len(self.audio_files)
diff --git a/spaces/Heshwa/html-code-generation-from-images-with-deep-neural-networks/classes/model/__init__.py b/spaces/Heshwa/html-code-generation-from-images-with-deep-neural-networks/classes/model/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/HighCWu/GPEN/retinaface/utils/timer.py b/spaces/HighCWu/GPEN/retinaface/utils/timer.py
deleted file mode 100644
index e4b3b8098a5ad41f8d18d42b6b2fedb694aa5508..0000000000000000000000000000000000000000
--- a/spaces/HighCWu/GPEN/retinaface/utils/timer.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# --------------------------------------------------------
-# Fast R-CNN
-# Copyright (c) 2015 Microsoft
-# Licensed under The MIT License [see LICENSE for details]
-# Written by Ross Girshick
-# --------------------------------------------------------
-
-import time
-
-
-class Timer(object):
- """A simple timer."""
- def __init__(self):
- self.total_time = 0.
- self.calls = 0
- self.start_time = 0.
- self.diff = 0.
- self.average_time = 0.
-
- def tic(self):
- # using time.time instead of time.clock because time time.clock
- # does not normalize for multithreading
- self.start_time = time.time()
-
- def toc(self, average=True):
- self.diff = time.time() - self.start_time
- self.total_time += self.diff
- self.calls += 1
- self.average_time = self.total_time / self.calls
- if average:
- return self.average_time
- else:
- return self.diff
-
- def clear(self):
- self.total_time = 0.
- self.calls = 0
- self.start_time = 0.
- self.diff = 0.
- self.average_time = 0.
diff --git a/spaces/HighCWu/Style2Paints-4-Gradio/README.md b/spaces/HighCWu/Style2Paints-4-Gradio/README.md
deleted file mode 100644
index d1c62d5447fd1f4f5cd9432fd13899780728d0b7..0000000000000000000000000000000000000000
--- a/spaces/HighCWu/Style2Paints-4-Gradio/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Style2Paints 4 Gradio
-emoji: 🐨
-colorFrom: indigo
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.27.0
-app_file: app.py
-pinned: false
-license: apache-2.0
-python_version: 3.8
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/HuggingAlgorithms/Object-Detection-with-YOLO/app.py b/spaces/HuggingAlgorithms/Object-Detection-with-YOLO/app.py
deleted file mode 100644
index a6521ca0578ed8eea87773d2c34fe0a4f9d4af78..0000000000000000000000000000000000000000
--- a/spaces/HuggingAlgorithms/Object-Detection-with-YOLO/app.py
+++ /dev/null
@@ -1,89 +0,0 @@
-from transformers import AutoFeatureExtractor, YolosForObjectDetection
-import gradio as gr
-from PIL import Image
-import torch
-import matplotlib.pyplot as plt
-import io
-import numpy as np
-
-
-COLORS = [[0.000, 0.447, 0.741], [0.850, 0.325, 0.098], [0.929, 0.694, 0.125],
- [0.494, 0.184, 0.556], [0.466, 0.674, 0.188], [0.301, 0.745, 0.933]]
-
-
-def process_class_list(classes_string: str):
- return [x.strip() for x in classes_string.split(",")] if classes_string else []
-
-def model_inference(img, model_name: str, prob_threshold: int, classes_to_show = str):
- feature_extractor = AutoFeatureExtractor.from_pretrained(f"hustvl/{model_name}")
- model = YolosForObjectDetection.from_pretrained(f"hustvl/{model_name}")
-
- img = Image.fromarray(img)
-
- pixel_values = feature_extractor(img, return_tensors="pt").pixel_values
-
- with torch.no_grad():
- outputs = model(pixel_values, output_attentions=True)
-
- probas = outputs.logits.softmax(-1)[0, :, :-1]
- keep = probas.max(-1).values > prob_threshold
-
- target_sizes = torch.tensor(img.size[::-1]).unsqueeze(0)
- postprocessed_outputs = feature_extractor.post_process(outputs, target_sizes)
- bboxes_scaled = postprocessed_outputs[0]['boxes']
-
- classes_list = process_class_list(classes_to_show)
- return plot_results(
- img, probas[keep], bboxes_scaled[keep], model, classes_list
- )
-
-def plot_results(pil_img, prob, boxes, model, classes_list):
- plt.figure(figsize=(16,10))
- plt.imshow(pil_img)
- ax = plt.gca()
- colors = COLORS * 100
- for p, (xmin, ymin, xmax, ymax), c in zip(prob, boxes.tolist(), colors):
- cl = p.argmax()
- object_class = model.config.id2label[cl.item()]
-
- if len(classes_list) > 0 :
- if object_class not in classes_list:
- continue
-
- ax.add_patch(plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin,
- fill=False, color=c, linewidth=3))
- text = f'{object_class}: {p[cl]:0.2f}'
- ax.text(xmin, ymin, text, fontsize=15,
- bbox=dict(facecolor='yellow', alpha=0.5))
- plt.axis('off')
- return fig2img(plt.gcf())
-
-def fig2img(fig):
- buf = io.BytesIO()
- fig.savefig(buf)
- buf.seek(0)
- return Image.open(buf)
-
-description = """
-Do you want to see what objects are in your images? Try our object detection app, powered by YOLOS, a state-of-the-art algorithm that can find and name multiple objects in a single image.
-You can upload or drag and drop an image file to detect objects using YOLOS models.
-You can also choose from different YOLOS models, adjust the probability threshold, and select the classes to use for detection.
-Our app will show you the results in an interactive image with bounding boxes and labels for each detected object.
-You can also download the results as an image file. Our app is fast, accurate, and easy to use.
-Try it now and discover the power of object detection! 😊
-"""
-
-image_in = gr.components.Image()
-image_out = gr.components.Image()
-model_choice = gr.components.Dropdown(["yolos-tiny", "yolos-small", "yolos-base", "yolos-small-300", "yolos-small-dwr"], value="yolos-small", label="YOLOS Model")
-prob_threshold_slider = gr.components.Slider(minimum=0, maximum=1.0, step=0.01, value=0.9, label="Probability Threshold")
-classes_to_show = gr.components.Textbox(placeholder="e.g. person, car , laptop", label="Classes to use (Optional)")
-
-Iface = gr.Interface(
- fn=model_inference,
- inputs=[image_in,model_choice, prob_threshold_slider, classes_to_show],
- outputs=image_out,
- title="Object Detection With YOLO",
- description=description,
- theme='HaleyCH/HaleyCH_Theme',
-).launch()
\ No newline at end of file
diff --git a/spaces/IAMTFRMZA/DreamlikeArt-Diffusion-1.0/app.py b/spaces/IAMTFRMZA/DreamlikeArt-Diffusion-1.0/app.py
deleted file mode 100644
index 650f6f13d5a4ea72430d7085707fc9aed8ba2013..0000000000000000000000000000000000000000
--- a/spaces/IAMTFRMZA/DreamlikeArt-Diffusion-1.0/app.py
+++ /dev/null
@@ -1,143 +0,0 @@
-import gradio as gr
-import os
-import sys
-from pathlib import Path
-import random
-import string
-import time
-from queue import Queue
-from threading import Thread
-import emoji
-
-
-text_gen=gr.Interface.load("spaces/phenomenon1981/MagicPrompt-Stable-Diffusion")
-def get_prompts(prompt_text):
- if prompt_text:
- return text_gen("dreamlikeart, " + prompt_text)
- else:
- return text_gen("")
-proc1=gr.Interface.load("models/dreamlike-art/dreamlike-diffusion-1.0")
-
-def restart_script_periodically():
- while True:
- random_time = random.randint(540, 600)
- time.sleep(random_time)
- os.execl(sys.executable, sys.executable, *sys.argv)
-
-
-restart_thread = Thread(target=restart_script_periodically, daemon=True)
-restart_thread.start()
-
-
-queue = Queue()
-queue_threshold = 100
-
-def add_random_noise(prompt, noise_level=0.00):
- if noise_level == 0:
- noise_level = 0.00
- percentage_noise = noise_level * 5
- num_noise_chars = int(len(prompt) * (percentage_noise/100))
- noise_indices = random.sample(range(len(prompt)), num_noise_chars)
- prompt_list = list(prompt)
- noise_chars = list(string.ascii_letters + string.punctuation + ' ' + string.digits)
- noise_chars.extend(['😍', '💩', '😂', '🤔', '😊', '🤗', '😭', '🙄', '😷', '🤯', '🤫', '🥴', '😴', '🤩', '🥳', '😔', '😩', '🤪', '😇', '🤢', '😈', '👹', '👻', '🤖', '👽', '💀', '🎃', '🎅', '🎄', '🎁', '🎂', '🎉', '🎈', '🎊', '🎮', '❤️', '💔', '💕', '💖', '💗', '🐶', '🐱', '🐭', '🐹', '🦊', '🐻', '🐨', '🐯', '🦁', '🐘', '🔥', '🌧️', '🌞', '🌈', '💥', '🌴', '🌊', '🌺', '🌻', '🌸', '🎨', '🌅', '🌌', '☁️', '⛈️', '❄️', '☀️', '🌤️', '⛅️', '🌥️', '🌦️', '🌧️', '🌩️', '🌨️', '🌫️', '☔️', '🌬️', '💨', '🌪️', '🌈'])
- for index in noise_indices:
- prompt_list[index] = random.choice(noise_chars)
- return "".join(prompt_list)
-
-
-
-def send_it1(inputs, noise_level, proc1=proc1):
- prompt_with_noise = add_random_noise(inputs, noise_level)
- while queue.qsize() >= queue_threshold:
- time.sleep(2)
- queue.put(prompt_with_noise)
- output1 = proc1(prompt_with_noise)
- return output1
-
-def send_it2(inputs, noise_level, proc1=proc1):
- prompt_with_noise = add_random_noise(inputs, noise_level)
- while queue.qsize() >= queue_threshold:
- time.sleep(2)
- queue.put(prompt_with_noise)
- output2 = proc1(prompt_with_noise)
- return output2
-
-def send_it3(inputs, noise_level, proc1=proc1):
- prompt_with_noise = add_random_noise(inputs, noise_level)
- while queue.qsize() >= queue_threshold:
- time.sleep(2)
- queue.put(prompt_with_noise)
- output3 = proc1(prompt_with_noise)
- return output3
-
-def send_it4(inputs, noise_level, proc1=proc1):
- prompt_with_noise = add_random_noise(inputs, noise_level)
- while queue.qsize() >= queue_threshold:
- time.sleep(2)
- queue.put(prompt_with_noise)
- output4 = proc1(prompt_with_noise)
- return output4
-
-
-with gr.Blocks(css='style.css') as demo:
- gr.HTML(
- """
-
-
-
- Image Creation Demo
-
-
-
- Noise Level: Controls how much randomness is added to the input before it is sent to the model. Higher noise level produces more diverse outputs, while lower noise level produces similar outputs.
-
-
- """
- )
- with gr.Column(elem_id="col-container"):
- with gr.Row(variant="compact"):
- input_text = gr.Textbox(
- label="Short Prompt",
- show_label=False,
- max_lines=2,
- placeholder="Enter a basic idea and click 'Magic Prompt'. Got no ideas? No problem, Simply just hit the magic button!",
- ).style(
- container=False,
- )
- see_prompts = gr.Button("✨ Magic Prompt ✨").style(full_width=False)
-
-
- with gr.Row(variant="compact"):
- prompt = gr.Textbox(
- label="Enter your prompt",
- show_label=False,
- max_lines=2,
- placeholder="Full Prompt",
- ).style(
- container=False,
- )
- run = gr.Button("Generate Images").style(full_width=False)
-
- with gr.Row():
- with gr.Row():
- noise_level = gr.Slider(minimum=0.0, maximum=3, step=0.1, label="Noise Level")
- with gr.Row():
- with gr.Row():
- output1=gr.Image(label="Dreamlike Diffusion 1.0",show_label=False)
- output2=gr.Image(label="Dreamlike Diffusion 1.0",show_label=False)
-
-
- see_prompts.click(get_prompts, inputs=[input_text], outputs=[prompt], queue=False)
- run.click(send_it1, inputs=[prompt, noise_level], outputs=[output1])
- run.click(send_it2, inputs=[prompt, noise_level], outputs=[output2])
-
-
- with gr.Row():
- gr.HTML(
- """
- """
-)
-
- demo.launch(enable_queue=True, inline=True)
- block.queue(concurrency_count=100)
\ No newline at end of file
diff --git a/spaces/ICML2022/OFA/fairseq/examples/speech_recognition/datasets/asr_prep_json.py b/spaces/ICML2022/OFA/fairseq/examples/speech_recognition/datasets/asr_prep_json.py
deleted file mode 100644
index b8db8ff16691158fae034a8ab3faad622b351caf..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/examples/speech_recognition/datasets/asr_prep_json.py
+++ /dev/null
@@ -1,125 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from __future__ import absolute_import, division, print_function, unicode_literals
-
-import argparse
-import concurrent.futures
-import json
-import multiprocessing
-import os
-from collections import namedtuple
-from itertools import chain
-
-import sentencepiece as spm
-from fairseq.data import Dictionary
-
-
-MILLISECONDS_TO_SECONDS = 0.001
-
-
-def process_sample(aud_path, lable, utt_id, sp, tgt_dict):
- import torchaudio
-
- input = {}
- output = {}
- si, ei = torchaudio.info(aud_path)
- input["length_ms"] = int(
- si.length / si.channels / si.rate / MILLISECONDS_TO_SECONDS
- )
- input["path"] = aud_path
-
- token = " ".join(sp.EncodeAsPieces(lable))
- ids = tgt_dict.encode_line(token, append_eos=False)
- output["text"] = lable
- output["token"] = token
- output["tokenid"] = ", ".join(map(str, [t.tolist() for t in ids]))
- return {utt_id: {"input": input, "output": output}}
-
-
-def main():
- parser = argparse.ArgumentParser()
- parser.add_argument(
- "--audio-dirs",
- nargs="+",
- default=["-"],
- required=True,
- help="input directories with audio files",
- )
- parser.add_argument(
- "--labels",
- required=True,
- help="aggregated input labels with format per line",
- type=argparse.FileType("r", encoding="UTF-8"),
- )
- parser.add_argument(
- "--spm-model",
- required=True,
- help="sentencepiece model to use for encoding",
- type=argparse.FileType("r", encoding="UTF-8"),
- )
- parser.add_argument(
- "--dictionary",
- required=True,
- help="file to load fairseq dictionary from",
- type=argparse.FileType("r", encoding="UTF-8"),
- )
- parser.add_argument("--audio-format", choices=["flac", "wav"], default="wav")
- parser.add_argument(
- "--output",
- required=True,
- type=argparse.FileType("w"),
- help="path to save json output",
- )
- args = parser.parse_args()
-
- sp = spm.SentencePieceProcessor()
- sp.Load(args.spm_model.name)
-
- tgt_dict = Dictionary.load(args.dictionary)
-
- labels = {}
- for line in args.labels:
- (utt_id, label) = line.split(" ", 1)
- labels[utt_id] = label
- if len(labels) == 0:
- raise Exception("No labels found in ", args.labels_path)
-
- Sample = namedtuple("Sample", "aud_path utt_id")
- samples = []
- for path, _, files in chain.from_iterable(
- os.walk(path) for path in args.audio_dirs
- ):
- for f in files:
- if f.endswith(args.audio_format):
- if len(os.path.splitext(f)) != 2:
- raise Exception("Expect file name. Got: ", f)
- utt_id = os.path.splitext(f)[0]
- if utt_id not in labels:
- continue
- samples.append(Sample(os.path.join(path, f), utt_id))
-
- utts = {}
- num_cpu = multiprocessing.cpu_count()
- with concurrent.futures.ThreadPoolExecutor(max_workers=num_cpu) as executor:
- future_to_sample = {
- executor.submit(
- process_sample, s.aud_path, labels[s.utt_id], s.utt_id, sp, tgt_dict
- ): s
- for s in samples
- }
- for future in concurrent.futures.as_completed(future_to_sample):
- try:
- data = future.result()
- except Exception as exc:
- print("generated an exception: ", exc)
- else:
- utts.update(data)
- json.dump({"utts": utts}, args.output, indent=4)
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/README.md b/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/README.md
deleted file mode 100644
index 4a3ae54b857c43621c9fb67ee4b214584beec835..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/README.md
+++ /dev/null
@@ -1,16 +0,0 @@
-Speech Synthesis (S^2)
-===
-
-Speech synthesis with fairseq.
-
-- Autoregressive and non-autoregressive models
-- Multi-speaker synthesis
-- Audio preprocessing
-- Automatic metrics
-- Similar data configuration as [S2T](../speech_to_text/README.md)
-
-
-## Examples
-- [Single-speaker synthesis on LJSpeech](docs/ljspeech_example.md)
-- [Multi-speaker synthesis on VCTK](docs/vctk_example.md)
-- [Multi-speaker synthesis on Common Voice](docs/common_voice_example.md)
diff --git a/spaces/ICML2022/OFA/fairseq/examples/wav2vec/unsupervised/scripts/wrd_to_ltr.py b/spaces/ICML2022/OFA/fairseq/examples/wav2vec/unsupervised/scripts/wrd_to_ltr.py
deleted file mode 100644
index f83471409a434556cab70086ca9e2d72d4bdddd5..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/examples/wav2vec/unsupervised/scripts/wrd_to_ltr.py
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/env python3 -u
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import sys
-
-
-def main():
- for line in sys.stdin:
- print(" ".join(list(line.strip().replace(" ", "|"))) + " |")
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/models/speech_to_text/berard.py b/spaces/ICML2022/OFA/fairseq/fairseq/models/speech_to_text/berard.py
deleted file mode 100644
index c505e3acaa84e5f3263ccbfaf9556f77123f09fc..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/fairseq/models/speech_to_text/berard.py
+++ /dev/null
@@ -1,606 +0,0 @@
-#!/usr/bin/env python3
-
-from ast import literal_eval
-from typing import List, Tuple
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from fairseq import checkpoint_utils, utils
-from fairseq.data.data_utils import lengths_to_padding_mask
-from fairseq.models import (
- FairseqEncoder,
- FairseqEncoderDecoderModel,
- FairseqIncrementalDecoder,
- register_model,
- register_model_architecture,
-)
-
-
-@register_model("s2t_berard")
-class BerardModel(FairseqEncoderDecoderModel):
- """Implementation of a model similar to https://arxiv.org/abs/1802.04200
-
- Paper title: End-to-End Automatic Speech Translation of Audiobooks
- An implementation is available in tensorflow at
- https://github.com/eske/seq2seq
- Relevant files in this implementation are the config
- (https://github.com/eske/seq2seq/blob/master/config/LibriSpeech/AST.yaml)
- and the model code
- (https://github.com/eske/seq2seq/blob/master/translate/models.py).
- The encoder and decoder try to be close to the original implementation.
- The attention is an MLP as in Bahdanau et al.
- (https://arxiv.org/abs/1409.0473).
- There is no state initialization by averaging the encoder outputs.
- """
-
- def __init__(self, encoder, decoder):
- super().__init__(encoder, decoder)
-
- @staticmethod
- def add_args(parser):
- parser.add_argument(
- "--input-layers",
- type=str,
- metavar="EXPR",
- help="List of linear layer dimensions. These "
- "layers are applied to the input features and "
- "are followed by tanh and possibly dropout.",
- )
- parser.add_argument(
- "--dropout",
- type=float,
- metavar="D",
- help="Dropout probability to use in the encoder/decoder. "
- "Note that this parameters control dropout in various places, "
- "there is no fine-grained control for dropout for embeddings "
- "vs LSTM layers for example.",
- )
- parser.add_argument(
- "--in-channels",
- type=int,
- metavar="N",
- help="Number of encoder input channels. " "Typically value is 1.",
- )
- parser.add_argument(
- "--conv-layers",
- type=str,
- metavar="EXPR",
- help="List of conv layers " "(format: (channels, kernel, stride)).",
- )
- parser.add_argument(
- "--num-blstm-layers",
- type=int,
- metavar="N",
- help="Number of encoder bi-LSTM layers.",
- )
- parser.add_argument(
- "--lstm-size", type=int, metavar="N", help="LSTM hidden size."
- )
- parser.add_argument(
- "--decoder-embed-dim",
- type=int,
- metavar="N",
- help="Embedding dimension of the decoder target tokens.",
- )
- parser.add_argument(
- "--decoder-hidden-dim",
- type=int,
- metavar="N",
- help="Decoder LSTM hidden dimension.",
- )
- parser.add_argument(
- "--decoder-num-layers",
- type=int,
- metavar="N",
- help="Number of decoder LSTM layers.",
- )
- parser.add_argument(
- "--attention-dim",
- type=int,
- metavar="N",
- help="Hidden layer dimension in MLP attention.",
- )
- parser.add_argument(
- "--output-layer-dim",
- type=int,
- metavar="N",
- help="Hidden layer dim for linear layer prior to output projection.",
- )
- parser.add_argument(
- "--load-pretrained-encoder-from",
- type=str,
- metavar="STR",
- help="model to take encoder weights from (for initialization)",
- )
- parser.add_argument(
- "--load-pretrained-decoder-from",
- type=str,
- metavar="STR",
- help="model to take decoder weights from (for initialization)",
- )
-
- @classmethod
- def build_encoder(cls, args, task):
- encoder = BerardEncoder(
- input_layers=literal_eval(args.input_layers),
- conv_layers=literal_eval(args.conv_layers),
- in_channels=args.input_channels,
- input_feat_per_channel=args.input_feat_per_channel,
- num_blstm_layers=args.num_blstm_layers,
- lstm_size=args.lstm_size,
- dropout=args.dropout,
- )
- if getattr(args, "load_pretrained_encoder_from", None):
- encoder = checkpoint_utils.load_pretrained_component_from_model(
- component=encoder, checkpoint=args.load_pretrained_encoder_from
- )
- return encoder
-
- @classmethod
- def build_decoder(cls, args, task):
- decoder = LSTMDecoder(
- dictionary=task.target_dictionary,
- embed_dim=args.decoder_embed_dim,
- num_layers=args.decoder_num_layers,
- hidden_size=args.decoder_hidden_dim,
- dropout=args.dropout,
- encoder_output_dim=2 * args.lstm_size, # bidirectional
- attention_dim=args.attention_dim,
- output_layer_dim=args.output_layer_dim,
- )
- if getattr(args, "load_pretrained_decoder_from", None):
- decoder = checkpoint_utils.load_pretrained_component_from_model(
- component=decoder, checkpoint=args.load_pretrained_decoder_from
- )
- return decoder
-
- @classmethod
- def build_model(cls, args, task):
- """Build a new model instance."""
- encoder = cls.build_encoder(args, task)
- decoder = cls.build_decoder(args, task)
-
- return cls(encoder, decoder)
-
- def get_normalized_probs(self, net_output, log_probs, sample=None):
- # net_output['encoder_out'] is a (B, T, D) tensor
- lprobs = super().get_normalized_probs(net_output, log_probs, sample)
- # lprobs is a (B, T, D) tensor
- lprobs.batch_first = True
- return lprobs
-
-
-class BerardEncoder(FairseqEncoder):
- def __init__(
- self,
- input_layers: List[int],
- conv_layers: List[Tuple[int]],
- in_channels: int,
- input_feat_per_channel: int,
- num_blstm_layers: int,
- lstm_size: int,
- dropout: float,
- ):
- """
- Args:
- input_layers: list of linear layer dimensions. These layers are
- applied to the input features and are followed by tanh and
- possibly dropout.
- conv_layers: list of conv2d layer configurations. A configuration is
- a tuple (out_channels, conv_kernel_size, stride).
- in_channels: number of input channels.
- input_feat_per_channel: number of input features per channel. These
- are speech features, typically 40 or 80.
- num_blstm_layers: number of bidirectional LSTM layers.
- lstm_size: size of the LSTM hidden (and cell) size.
- dropout: dropout probability. Dropout can be applied after the
- linear layers and LSTM layers but not to the convolutional
- layers.
- """
- super().__init__(None)
-
- self.input_layers = nn.ModuleList()
- in_features = input_feat_per_channel
- for out_features in input_layers:
- if dropout > 0:
- self.input_layers.append(
- nn.Sequential(
- nn.Linear(in_features, out_features), nn.Dropout(p=dropout)
- )
- )
- else:
- self.input_layers.append(nn.Linear(in_features, out_features))
- in_features = out_features
-
- self.in_channels = in_channels
- self.input_dim = input_feat_per_channel
- self.conv_kernel_sizes_and_strides = []
- self.conv_layers = nn.ModuleList()
- lstm_input_dim = input_layers[-1]
- for conv_layer in conv_layers:
- out_channels, conv_kernel_size, conv_stride = conv_layer
- self.conv_layers.append(
- nn.Conv2d(
- in_channels,
- out_channels,
- conv_kernel_size,
- stride=conv_stride,
- padding=conv_kernel_size // 2,
- )
- )
- self.conv_kernel_sizes_and_strides.append((conv_kernel_size, conv_stride))
- in_channels = out_channels
- lstm_input_dim //= conv_stride
-
- lstm_input_dim *= conv_layers[-1][0]
- self.lstm_size = lstm_size
- self.num_blstm_layers = num_blstm_layers
- self.lstm = nn.LSTM(
- input_size=lstm_input_dim,
- hidden_size=lstm_size,
- num_layers=num_blstm_layers,
- dropout=dropout,
- bidirectional=True,
- )
- self.output_dim = 2 * lstm_size # bidirectional
- if dropout > 0:
- self.dropout = nn.Dropout(p=dropout)
- else:
- self.dropout = None
-
- def forward(self, src_tokens, src_lengths=None, **kwargs):
- """
- Args
- src_tokens: padded tensor (B, T, C * feat)
- src_lengths: tensor of original lengths of input utterances (B,)
- """
- bsz, max_seq_len, _ = src_tokens.size()
- # (B, C, T, feat)
- x = (
- src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim)
- .transpose(1, 2)
- .contiguous()
- )
-
- for input_layer in self.input_layers:
- x = input_layer(x)
- x = torch.tanh(x)
-
- for conv_layer in self.conv_layers:
- x = conv_layer(x)
-
- bsz, _, output_seq_len, _ = x.size()
-
- # (B, C, T, feat) -> (B, T, C, feat) -> (T, B, C, feat) ->
- # (T, B, C * feat)
- x = x.transpose(1, 2).transpose(0, 1).contiguous().view(output_seq_len, bsz, -1)
-
- input_lengths = src_lengths.clone()
- for k, s in self.conv_kernel_sizes_and_strides:
- p = k // 2
- input_lengths = (input_lengths.float() + 2 * p - k) / s + 1
- input_lengths = input_lengths.floor().long()
-
- packed_x = nn.utils.rnn.pack_padded_sequence(x, input_lengths)
-
- h0 = x.new(2 * self.num_blstm_layers, bsz, self.lstm_size).zero_()
- c0 = x.new(2 * self.num_blstm_layers, bsz, self.lstm_size).zero_()
- packed_outs, _ = self.lstm(packed_x, (h0, c0))
-
- # unpack outputs and apply dropout
- x, output_lengths = nn.utils.rnn.pad_packed_sequence(packed_outs)
- if self.dropout is not None:
- x = self.dropout(x)
-
- encoder_padding_mask = (
- lengths_to_padding_mask(output_lengths).to(src_tokens.device).t()
- )
-
- return {
- "encoder_out": x, # (T, B, C)
- "encoder_padding_mask": encoder_padding_mask, # (T, B)
- }
-
- def reorder_encoder_out(self, encoder_out, new_order):
- encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
- 1, new_order
- )
- encoder_out["encoder_padding_mask"] = encoder_out[
- "encoder_padding_mask"
- ].index_select(1, new_order)
- return encoder_out
-
-
-class MLPAttention(nn.Module):
- """The original attention from Badhanau et al. (2014)
-
- https://arxiv.org/abs/1409.0473, based on a Multi-Layer Perceptron.
- The attention score between position i in the encoder and position j in the
- decoder is: alpha_ij = V_a * tanh(W_ae * enc_i + W_ad * dec_j + b_a)
- """
-
- def __init__(self, decoder_hidden_state_dim, context_dim, attention_dim):
- super().__init__()
-
- self.context_dim = context_dim
- self.attention_dim = attention_dim
- # W_ae and b_a
- self.encoder_proj = nn.Linear(context_dim, self.attention_dim, bias=True)
- # W_ad
- self.decoder_proj = nn.Linear(
- decoder_hidden_state_dim, self.attention_dim, bias=False
- )
- # V_a
- self.to_scores = nn.Linear(self.attention_dim, 1, bias=False)
-
- def forward(self, decoder_state, source_hids, encoder_padding_mask):
- """The expected input dimensions are:
- decoder_state: bsz x decoder_hidden_state_dim
- source_hids: src_len x bsz x context_dim
- encoder_padding_mask: src_len x bsz
- """
- src_len, bsz, _ = source_hids.size()
- # (src_len*bsz) x context_dim (to feed through linear)
- flat_source_hids = source_hids.view(-1, self.context_dim)
- # (src_len*bsz) x attention_dim
- encoder_component = self.encoder_proj(flat_source_hids)
- # src_len x bsz x attention_dim
- encoder_component = encoder_component.view(src_len, bsz, self.attention_dim)
- # 1 x bsz x attention_dim
- decoder_component = self.decoder_proj(decoder_state).unsqueeze(0)
- # Sum with broadcasting and apply the non linearity
- # src_len x bsz x attention_dim
- hidden_att = torch.tanh(
- (decoder_component + encoder_component).view(-1, self.attention_dim)
- )
- # Project onto the reals to get attentions scores (src_len x bsz)
- attn_scores = self.to_scores(hidden_att).view(src_len, bsz)
-
- # Mask + softmax (src_len x bsz)
- if encoder_padding_mask is not None:
- attn_scores = (
- attn_scores.float()
- .masked_fill_(encoder_padding_mask, float("-inf"))
- .type_as(attn_scores)
- ) # FP16 support: cast to float and back
- # srclen x bsz
- normalized_masked_attn_scores = F.softmax(attn_scores, dim=0)
-
- # Sum weighted sources (bsz x context_dim)
- attn_weighted_context = (
- source_hids * normalized_masked_attn_scores.unsqueeze(2)
- ).sum(dim=0)
-
- return attn_weighted_context, normalized_masked_attn_scores
-
-
-class LSTMDecoder(FairseqIncrementalDecoder):
- def __init__(
- self,
- dictionary,
- embed_dim,
- num_layers,
- hidden_size,
- dropout,
- encoder_output_dim,
- attention_dim,
- output_layer_dim,
- ):
- """
- Args:
- dictionary: target text dictionary.
- embed_dim: embedding dimension for target tokens.
- num_layers: number of LSTM layers.
- hidden_size: hidden size for LSTM layers.
- dropout: dropout probability. Dropout can be applied to the
- embeddings, the LSTM layers, and the context vector.
- encoder_output_dim: encoder output dimension (hidden size of
- encoder LSTM).
- attention_dim: attention dimension for MLP attention.
- output_layer_dim: size of the linear layer prior to output
- projection.
- """
- super().__init__(dictionary)
- self.num_layers = num_layers
- self.hidden_size = hidden_size
- num_embeddings = len(dictionary)
- padding_idx = dictionary.pad()
- self.embed_tokens = nn.Embedding(num_embeddings, embed_dim, padding_idx)
- if dropout > 0:
- self.dropout = nn.Dropout(p=dropout)
- else:
- self.dropout = None
-
- self.layers = nn.ModuleList()
- for layer_id in range(num_layers):
- input_size = embed_dim if layer_id == 0 else encoder_output_dim
- self.layers.append(
- nn.LSTMCell(input_size=input_size, hidden_size=hidden_size)
- )
-
- self.context_dim = encoder_output_dim
- self.attention = MLPAttention(
- decoder_hidden_state_dim=hidden_size,
- context_dim=encoder_output_dim,
- attention_dim=attention_dim,
- )
-
- self.deep_output_layer = nn.Linear(
- hidden_size + encoder_output_dim + embed_dim, output_layer_dim
- )
- self.output_projection = nn.Linear(output_layer_dim, num_embeddings)
-
- def forward(
- self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs
- ):
- encoder_padding_mask = encoder_out["encoder_padding_mask"]
- encoder_outs = encoder_out["encoder_out"]
-
- if incremental_state is not None:
- prev_output_tokens = prev_output_tokens[:, -1:]
- bsz, seqlen = prev_output_tokens.size()
-
- srclen = encoder_outs.size(0)
-
- # embed tokens
- embeddings = self.embed_tokens(prev_output_tokens)
- x = embeddings
- if self.dropout is not None:
- x = self.dropout(x)
-
- # B x T x C -> T x B x C
- x = x.transpose(0, 1)
-
- # initialize previous states (or get from cache during incremental
- # generation)
- cached_state = utils.get_incremental_state(
- self, incremental_state, "cached_state"
- )
- if cached_state is not None:
- prev_hiddens, prev_cells = cached_state
- else:
- prev_hiddens = [encoder_out["encoder_out"].mean(dim=0)] * self.num_layers
- prev_cells = [x.new_zeros(bsz, self.hidden_size)] * self.num_layers
-
- attn_scores = x.new_zeros(bsz, srclen)
- attention_outs = []
- outs = []
- for j in range(seqlen):
- input = x[j, :, :]
- attention_out = None
- for i, layer in enumerate(self.layers):
- # the previous state is one layer below except for the bottom
- # layer where the previous state is the state emitted by the
- # top layer
- hidden, cell = layer(
- input,
- (
- prev_hiddens[(i - 1) % self.num_layers],
- prev_cells[(i - 1) % self.num_layers],
- ),
- )
- if self.dropout is not None:
- hidden = self.dropout(hidden)
- prev_hiddens[i] = hidden
- prev_cells[i] = cell
- if attention_out is None:
- attention_out, attn_scores = self.attention(
- hidden, encoder_outs, encoder_padding_mask
- )
- if self.dropout is not None:
- attention_out = self.dropout(attention_out)
- attention_outs.append(attention_out)
- input = attention_out
-
- # collect the output of the top layer
- outs.append(hidden)
-
- # cache previous states (no-op except during incremental generation)
- utils.set_incremental_state(
- self, incremental_state, "cached_state", (prev_hiddens, prev_cells)
- )
-
- # collect outputs across time steps
- x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size)
- attention_outs_concat = torch.cat(attention_outs, dim=0).view(
- seqlen, bsz, self.context_dim
- )
-
- # T x B x C -> B x T x C
- x = x.transpose(0, 1)
- attention_outs_concat = attention_outs_concat.transpose(0, 1)
-
- # concat LSTM output, attention output and embedding
- # before output projection
- x = torch.cat((x, attention_outs_concat, embeddings), dim=2)
- x = self.deep_output_layer(x)
- x = torch.tanh(x)
- if self.dropout is not None:
- x = self.dropout(x)
- # project back to size of vocabulary
- x = self.output_projection(x)
-
- # to return the full attn_scores tensor, we need to fix the decoder
- # to account for subsampling input frames
- # return x, attn_scores
- return x, None
-
- def reorder_incremental_state(self, incremental_state, new_order):
- super().reorder_incremental_state(incremental_state, new_order)
- cached_state = utils.get_incremental_state(
- self, incremental_state, "cached_state"
- )
- if cached_state is None:
- return
-
- def reorder_state(state):
- if isinstance(state, list):
- return [reorder_state(state_i) for state_i in state]
- return state.index_select(0, new_order)
-
- new_state = tuple(map(reorder_state, cached_state))
- utils.set_incremental_state(self, incremental_state, "cached_state", new_state)
-
-
-@register_model_architecture(model_name="s2t_berard", arch_name="s2t_berard")
-def berard(args):
- """The original version: "End-to-End Automatic Speech Translation of
- Audiobooks" (https://arxiv.org/abs/1802.04200)
- """
- args.input_layers = getattr(args, "input_layers", "[256, 128]")
- args.conv_layers = getattr(args, "conv_layers", "[(16, 3, 2), (16, 3, 2)]")
- args.num_blstm_layers = getattr(args, "num_blstm_layers", 3)
- args.lstm_size = getattr(args, "lstm_size", 256)
- args.dropout = getattr(args, "dropout", 0.2)
- args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128)
- args.decoder_num_layers = getattr(args, "decoder_num_layers", 2)
- args.decoder_hidden_dim = getattr(args, "decoder_hidden_dim", 512)
- args.attention_dim = getattr(args, "attention_dim", 512)
- args.output_layer_dim = getattr(args, "output_layer_dim", 128)
- args.load_pretrained_encoder_from = getattr(
- args, "load_pretrained_encoder_from", None
- )
- args.load_pretrained_decoder_from = getattr(
- args, "load_pretrained_decoder_from", None
- )
-
-
-@register_model_architecture(model_name="s2t_berard", arch_name="s2t_berard_256_3_3")
-def berard_256_3_3(args):
- """Used in
- * "Harnessing Indirect Training Data for End-to-End Automatic Speech
- Translation: Tricks of the Trade" (https://arxiv.org/abs/1909.06515)
- * "CoVoST: A Diverse Multilingual Speech-To-Text Translation Corpus"
- (https://arxiv.org/pdf/2002.01320.pdf)
- * "Self-Supervised Representations Improve End-to-End Speech Translation"
- (https://arxiv.org/abs/2006.12124)
- """
- args.decoder_num_layers = getattr(args, "decoder_num_layers", 3)
- berard(args)
-
-
-@register_model_architecture(model_name="s2t_berard", arch_name="s2t_berard_512_3_2")
-def berard_512_3_2(args):
- args.num_blstm_layers = getattr(args, "num_blstm_layers", 3)
- args.lstm_size = getattr(args, "lstm_size", 512)
- args.dropout = getattr(args, "dropout", 0.3)
- args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256)
- args.decoder_num_layers = getattr(args, "decoder_num_layers", 2)
- args.decoder_hidden_dim = getattr(args, "decoder_hidden_dim", 1024)
- args.attention_dim = getattr(args, "attention_dim", 512)
- args.output_layer_dim = getattr(args, "output_layer_dim", 256)
- berard(args)
-
-
-@register_model_architecture(model_name="s2t_berard", arch_name="s2t_berard_512_5_3")
-def berard_512_5_3(args):
- args.num_blstm_layers = getattr(args, "num_blstm_layers", 5)
- args.lstm_size = getattr(args, "lstm_size", 512)
- args.dropout = getattr(args, "dropout", 0.3)
- args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256)
- args.decoder_num_layers = getattr(args, "decoder_num_layers", 3)
- args.decoder_hidden_dim = getattr(args, "decoder_hidden_dim", 1024)
- args.attention_dim = getattr(args, "attention_dim", 512)
- args.output_layer_dim = getattr(args, "output_layer_dim", 256)
- berard(args)
diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/modules/transformer_sentence_encoder_layer.py b/spaces/ICML2022/OFA/fairseq/fairseq/modules/transformer_sentence_encoder_layer.py
deleted file mode 100644
index f869c4b2f8fb15f96a292e39bd293df7898a4fce..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/fairseq/modules/transformer_sentence_encoder_layer.py
+++ /dev/null
@@ -1,139 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from typing import Callable, Optional
-
-import torch
-import torch.nn as nn
-from fairseq import utils
-from fairseq.modules import LayerNorm, MultiheadAttention
-from fairseq.modules.fairseq_dropout import FairseqDropout
-from fairseq.modules.quant_noise import quant_noise
-
-
-class TransformerSentenceEncoderLayer(nn.Module):
- """
- Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained
- models.
- """
-
- def __init__(
- self,
- embedding_dim: int = 768,
- ffn_embedding_dim: int = 3072,
- num_attention_heads: int = 8,
- dropout: float = 0.1,
- attention_dropout: float = 0.1,
- activation_dropout: float = 0.1,
- activation_fn: str = "relu",
- export: bool = False,
- q_noise: float = 0.0,
- qn_block_size: int = 8,
- init_fn: Callable = None,
- ) -> None:
- super().__init__()
-
- if init_fn is not None:
- init_fn()
-
- # Initialize parameters
- self.embedding_dim = embedding_dim
- self.num_attention_heads = num_attention_heads
- self.attention_dropout = attention_dropout
- self.q_noise = q_noise
- self.qn_block_size = qn_block_size
-
- self.dropout_module = FairseqDropout(
- dropout, module_name=self.__class__.__name__
- )
- self.activation_dropout_module = FairseqDropout(
- activation_dropout, module_name=self.__class__.__name__
- )
-
- # Initialize blocks
- self.activation_fn = utils.get_activation_fn(activation_fn)
- self.self_attn = self.build_self_attention(
- self.embedding_dim,
- num_attention_heads,
- dropout=attention_dropout,
- self_attention=True,
- q_noise=q_noise,
- qn_block_size=qn_block_size,
- )
-
- # layer norm associated with the self attention layer
- self.self_attn_layer_norm = LayerNorm(self.embedding_dim, export=export)
-
- self.fc1 = self.build_fc1(
- self.embedding_dim,
- ffn_embedding_dim,
- q_noise=q_noise,
- qn_block_size=qn_block_size,
- )
- self.fc2 = self.build_fc2(
- ffn_embedding_dim,
- self.embedding_dim,
- q_noise=q_noise,
- qn_block_size=qn_block_size,
- )
-
- # layer norm associated with the position wise feed-forward NN
- self.final_layer_norm = LayerNorm(self.embedding_dim, export=export)
-
- def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
- return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
-
- def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
- return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
-
- def build_self_attention(
- self,
- embed_dim,
- num_attention_heads,
- dropout,
- self_attention,
- q_noise,
- qn_block_size,
- ):
- return MultiheadAttention(
- embed_dim,
- num_attention_heads,
- dropout=dropout,
- self_attention=True,
- q_noise=q_noise,
- qn_block_size=qn_block_size,
- )
-
- def forward(
- self,
- x: torch.Tensor,
- self_attn_mask: Optional[torch.Tensor] = None,
- self_attn_padding_mask: Optional[torch.Tensor] = None,
- ):
- """
- LayerNorm is applied either before or after the self-attention/ffn
- modules similar to the original Transformer implementation.
- """
- residual = x
- x, attn = self.self_attn(
- query=x,
- key=x,
- value=x,
- key_padding_mask=self_attn_padding_mask,
- need_weights=False,
- attn_mask=self_attn_mask,
- )
- x = self.dropout_module(x)
- x = residual + x
- x = self.self_attn_layer_norm(x)
-
- residual = x
- x = self.activation_fn(self.fc1(x))
- x = self.activation_dropout_module(x)
- x = self.fc2(x)
- x = self.dropout_module(x)
- x = residual + x
- x = self.final_layer_norm(x)
- return x, attn
diff --git a/spaces/InpaintAI/Inpaint-Anything/third_party/segment-anything/demo/src/App.tsx b/spaces/InpaintAI/Inpaint-Anything/third_party/segment-anything/demo/src/App.tsx
deleted file mode 100644
index a426553564b0652ba26ef39484ec67121809e939..0000000000000000000000000000000000000000
--- a/spaces/InpaintAI/Inpaint-Anything/third_party/segment-anything/demo/src/App.tsx
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright (c) Meta Platforms, Inc. and affiliates.
-// All rights reserved.
-
-// This source code is licensed under the license found in the
-// LICENSE file in the root directory of this source tree.
-
-import { InferenceSession, Tensor } from "onnxruntime-web";
-import React, { useContext, useEffect, useState } from "react";
-import "./assets/scss/App.scss";
-import { handleImageScale } from "./components/helpers/scaleHelper";
-import { modelScaleProps } from "./components/helpers/Interfaces";
-import { onnxMaskToImage } from "./components/helpers/maskUtils";
-import { modelData } from "./components/helpers/onnxModelAPI";
-import Stage from "./components/Stage";
-import AppContext from "./components/hooks/createContext";
-const ort = require("onnxruntime-web");
-/* @ts-ignore */
-import npyjs from "npyjs";
-
-// Define image, embedding and model paths
-const IMAGE_PATH = "/assets/data/dogs.jpg";
-const IMAGE_EMBEDDING = "/assets/data/dogs_embedding.npy";
-const MODEL_DIR = "/model/sam_onnx_quantized_example.onnx";
-
-const App = () => {
- const {
- clicks: [clicks],
- image: [, setImage],
- maskImg: [, setMaskImg],
- } = useContext(AppContext)!;
- const [model, setModel] = useState(null); // ONNX model
- const [tensor, setTensor] = useState(null); // Image embedding tensor
-
- // The ONNX model expects the input to be rescaled to 1024.
- // The modelScale state variable keeps track of the scale values.
- const [modelScale, setModelScale] = useState(null);
-
- // Initialize the ONNX model. load the image, and load the SAM
- // pre-computed image embedding
- useEffect(() => {
- // Initialize the ONNX model
- const initModel = async () => {
- try {
- if (MODEL_DIR === undefined) return;
- const URL: string = MODEL_DIR;
- const model = await InferenceSession.create(URL);
- setModel(model);
- } catch (e) {
- console.log(e);
- }
- };
- initModel();
-
- // Load the image
- const url = new URL(IMAGE_PATH, location.origin);
- loadImage(url);
-
- // Load the Segment Anything pre-computed embedding
- Promise.resolve(loadNpyTensor(IMAGE_EMBEDDING, "float32")).then(
- (embedding) => setTensor(embedding)
- );
- }, []);
-
- const loadImage = async (url: URL) => {
- try {
- const img = new Image();
- img.src = url.href;
- img.onload = () => {
- const { height, width, samScale } = handleImageScale(img);
- setModelScale({
- height: height, // original image height
- width: width, // original image width
- samScale: samScale, // scaling factor for image which has been resized to longest side 1024
- });
- img.width = width;
- img.height = height;
- setImage(img);
- };
- } catch (error) {
- console.log(error);
- }
- };
-
- // Decode a Numpy file into a tensor.
- const loadNpyTensor = async (tensorFile: string, dType: string) => {
- let npLoader = new npyjs();
- const npArray = await npLoader.load(tensorFile);
- const tensor = new ort.Tensor(dType, npArray.data, npArray.shape);
- return tensor;
- };
-
- // Run the ONNX model every time clicks has changed
- useEffect(() => {
- runONNX();
- }, [clicks]);
-
- const runONNX = async () => {
- try {
- if (
- model === null ||
- clicks === null ||
- tensor === null ||
- modelScale === null
- )
- return;
- else {
- // Preapre the model input in the correct format for SAM.
- // The modelData function is from onnxModelAPI.tsx.
- const feeds = modelData({
- clicks,
- tensor,
- modelScale,
- });
- if (feeds === undefined) return;
- // Run the SAM ONNX model with the feeds returned from modelData()
- const results = await model.run(feeds);
- const output = results[model.outputNames[0]];
- // The predicted mask returned from the ONNX model is an array which is
- // rendered as an HTML image using onnxMaskToImage() from maskUtils.tsx.
- setMaskImg(onnxMaskToImage(output.data, output.dims[2], output.dims[3]));
- }
- } catch (e) {
- console.log(e);
- }
- };
-
- return ;
-};
-
-export default App;
diff --git a/spaces/Intel/NeuralChat-ICX-INT4/fastchat/data/__init__.py b/spaces/Intel/NeuralChat-ICX-INT4/fastchat/data/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Intel/NeuralChat-ICX-INT4/fastchat/serve/inference.py b/spaces/Intel/NeuralChat-ICX-INT4/fastchat/serve/inference.py
deleted file mode 100644
index d4cbf2a34af5fa4f19566675a7b1bb1ce53eb3f4..0000000000000000000000000000000000000000
--- a/spaces/Intel/NeuralChat-ICX-INT4/fastchat/serve/inference.py
+++ /dev/null
@@ -1,326 +0,0 @@
-"""Inference for FastChat models."""
-import abc
-from typing import Optional
-import warnings
-
-import torch
-
-try:
- from transformers import (
- AutoTokenizer,
- AutoModelForCausalLM,
- LlamaTokenizer,
- LlamaForCausalLM,
- AutoModel,
- AutoModelForSeq2SeqLM,
- AutoConfig,
- )
-except ImportError:
- from transformers import (
- AutoTokenizer,
- AutoModelForCausalLM,
- LLaMATokenizer,
- LLamaForCausalLM,
- AutoModel,
- AutoModelForSeq2SeqLM,
- AutoConfig,
- )
-
-from fastchat.conversation import (
- conv_templates,
- get_default_conv_template,
- compute_skip_echo_len,
- SeparatorStyle,
-)
-from fastchat.serve.compression import load_compress_model
-from fastchat.serve.monkey_patch_non_inplace import (
- replace_llama_attn_with_non_inplace_operations,
-)
-from fastchat.serve.serve_chatglm import chatglm_generate_stream
-
-
-def raise_warning_for_old_weights(model_path, model):
- if "vicuna" in model_path.lower():
- try:
- is_vicuna = isinstance(model, LlamaForCausalLM)
- except Exception:
- is_vicuna = isinstance(model, LLamaForCausalLM)
- if is_vicuna and model.model.vocab_size > 32000:
- warnings.warn(
- "\nYou are probably using the old Vicuna-v0 model, "
- "which will generate unexpected results with the "
- "current fschat.\nYou can try one of the following methods:\n"
- "1. Upgrade your weights to the new Vicuna-v1.1: https://github.com/lm-sys/FastChat#vicuna-weights.\n"
- "2. Use the old conversation template by `python3 -m fastchat.serve.cli --model-path /path/to/vicuna-v0 --conv-template conv_one_shot`\n"
- "3. Downgrade fschat to fschat==0.1.10 (Not recommonded).\n"
- )
-
-
-def get_gpu_memory(max_gpus=None):
- gpu_memory = []
- num_gpus = (
- torch.cuda.device_count()
- if max_gpus is None
- else min(max_gpus, torch.cuda.device_count())
- )
-
- for gpu_id in range(num_gpus):
- with torch.cuda.device(gpu_id):
- device = torch.cuda.current_device()
- gpu_properties = torch.cuda.get_device_properties(device)
- total_memory = gpu_properties.total_memory / (1024**3)
- allocated_memory = torch.cuda.memory_allocated() / (1024**3)
- available_memory = total_memory - allocated_memory
- gpu_memory.append(available_memory)
- return gpu_memory
-
-
-def load_model(
- model_path, device, num_gpus, max_gpu_memory=None, load_8bit=False, debug=False
-):
- if device == "cpu":
- kwargs = {"torch_dtype": torch.float32}
- elif device == "cuda":
- kwargs = {"torch_dtype": torch.float16}
- if num_gpus == "auto":
- kwargs["device_map"] = "auto"
- else:
- num_gpus = int(num_gpus)
- if num_gpus != 1:
- kwargs["device_map"] = "auto"
- if max_gpu_memory is None:
- kwargs[
- "device_map"
- ] = "sequential" # This is important for not the same VRAM sizes
- available_gpu_memory = get_gpu_memory(num_gpus)
- kwargs["max_memory"] = {
- i: str(int(available_gpu_memory[i] * 0.85)) + "GiB"
- for i in range(num_gpus)
- }
- else:
- kwargs["max_memory"] = {i: max_gpu_memory for i in range(num_gpus)}
- print("init_kwargs", kwargs)
- elif device == "mps":
- kwargs = {"torch_dtype": torch.float16}
- # Avoid bugs in mps backend by not using in-place operations.
- replace_llama_attn_with_non_inplace_operations()
- else:
- raise ValueError(f"Invalid device: {device}")
-
- if load_8bit:
- if num_gpus != 1 and num_gpus != "1":
- warnings.warn("8-bit quantization is not supported for multi-gpu inference.")
- else:
- return load_compress_model(model_path=model_path, device=device, torch_dtype=kwargs["torch_dtype"])
-
- if "chatglm" in model_path:
- tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
- model = AutoModel.from_pretrained(
- model_path, trust_remote_code=True, **kwargs
- ).cuda()
- elif "google/flan-t5" in model_path:
- tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
- model = AutoModelForSeq2SeqLM.from_pretrained(
- model_path, low_cpu_mem_usage=True, **kwargs
- )
- elif "dolly" in model_path:
- tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
- model = AutoModelForCausalLM.from_pretrained(
- model_path, low_cpu_mem_usage=True, **kwargs
- )
- # 50277 means "### End"
- tokenizer.eos_token_id = 50277
- elif "pythia" in model_path or "stablelm" in model_path:
- tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
- model = AutoModelForCausalLM.from_pretrained(
- model_path, low_cpu_mem_usage=True, **kwargs
- )
- else:
- tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
- model = AutoModelForCausalLM.from_pretrained(
- model_path, low_cpu_mem_usage=True, **kwargs
- )
- raise_warning_for_old_weights(model_path, model)
-
-
- if (device == "cuda" and num_gpus == 1) or device == "mps":
- model.to(device)
-
- if debug:
- print(model)
-
- return model, tokenizer
-
-
-@torch.inference_mode()
-def generate_stream(
- model, tokenizer, params, device, context_len=2048, stream_interval=2
-):
- prompt = params["prompt"]
- l_prompt = len(prompt)
- temperature = float(params.get("temperature", 1.0))
- max_new_tokens = int(params.get("max_new_tokens", 256))
- stop_str = params.get("stop", None)
- stop_token_ids = params.get("stop_ids", [tokenizer.eos_token_id])
-
- input_ids = tokenizer(prompt).input_ids
- output_ids = list(input_ids)
-
- max_src_len = context_len - max_new_tokens - 8
- input_ids = input_ids[-max_src_len:]
-
- for i in range(max_new_tokens):
- if i == 0:
- if model.config.is_encoder_decoder:
- encoder_outputs = model.encoder(
- input_ids=torch.as_tensor([input_ids], device=device)
- )
- out = model(
- torch.as_tensor([input_ids], device=device),
- decoder_input_ids=torch.as_tensor(
- [[model.generation_config.decoder_start_token_id]],
- device=device,
- ),
- encoder_outputs=encoder_outputs,
- use_cache=True,
- )
- logits = out.logits
- past_key_values = out.past_key_values
- else:
- out = model(torch.as_tensor([input_ids], device=device), use_cache=True)
- logits = out.logits
- past_key_values = out.past_key_values
- else:
- if model.config.is_encoder_decoder:
- out = model(
- input_ids=torch.as_tensor([input_ids], device=device),
- use_cache=True,
- encoder_outputs=encoder_outputs,
- decoder_input_ids=torch.as_tensor([[token]], device=device),
- past_key_values=past_key_values,
- )
- logits = out.logits
- past_key_values = out.past_key_values
- else:
- out = model(
- input_ids=torch.as_tensor([[token]], device=device),
- use_cache=True,
- past_key_values=past_key_values,
- )
- logits = out.logits
- past_key_values = out.past_key_values
-
- last_token_logits = logits[0][-1]
-
- if device == "mps":
- # Switch to CPU by avoiding some bugs in mps backend.
- last_token_logits = last_token_logits.float().to("cpu")
-
- if temperature < 1e-4:
- token = int(torch.argmax(last_token_logits))
- else:
- probs = torch.softmax(last_token_logits / temperature, dim=-1)
- token = int(torch.multinomial(probs, num_samples=1))
-
- output_ids.append(token)
-
- if token in stop_token_ids:
- stopped = True
- else:
- stopped = False
-
- if i % stream_interval == 0 or i == max_new_tokens - 1 or stopped:
- output = tokenizer.decode(output_ids, skip_special_tokens=True)
- if stop_str:
- pos = output.rfind(stop_str, l_prompt)
- if pos != -1:
- output = output[:pos]
- stopped = True
- yield output
-
- if stopped:
- break
-
- del past_key_values
-
-
-class ChatIO(abc.ABC):
- @abc.abstractmethod
- def prompt_for_input(self, role: str) -> str:
- """Prompt for input from a role."""
-
- @abc.abstractmethod
- def prompt_for_output(self, role: str):
- """Prompt for output from a role."""
-
- @abc.abstractmethod
- def stream_output(self, output_stream, skip_echo_len: int):
- """Stream output."""
-
-
-def chat_loop(
- model_path: str,
- device: str,
- num_gpus: str,
- max_gpu_memory: str,
- load_8bit: bool,
- conv_template: Optional[str],
- temperature: float,
- max_new_tokens: int,
- chatio: ChatIO,
- debug: bool,
-):
- # Model
- model, tokenizer = load_model(
- model_path, device, num_gpus, max_gpu_memory, load_8bit, debug
- )
- is_chatglm = "chatglm" in str(type(model)).lower()
-
- # Chat
- if conv_template:
- conv = conv_templates[conv_template].copy()
- else:
- conv = get_default_conv_template(model_path).copy()
-
- while True:
- try:
- inp = chatio.prompt_for_input(conv.roles[0])
- except EOFError:
- inp = ""
- if not inp:
- print("exit...")
- break
-
- conv.append_message(conv.roles[0], inp)
- conv.append_message(conv.roles[1], None)
-
- if is_chatglm:
- prompt = conv.messages[conv.offset :]
- generate_stream_func = chatglm_generate_stream
- else:
- generate_stream_func = generate_stream
- prompt = conv.get_prompt()
-
- skip_echo_len = compute_skip_echo_len(model_path, conv, prompt)
- stop_str = (
- conv.sep
- if conv.sep_style in [SeparatorStyle.SINGLE, SeparatorStyle.BAIZE]
- else None
- )
-
- params = {
- "model": model_path,
- "prompt": prompt,
- "temperature": temperature,
- "max_new_tokens": max_new_tokens,
- "stop": stop_str,
- }
-
- chatio.prompt_for_output(conv.roles[1])
- output_stream = generate_stream_func(model, tokenizer, params, device)
- outputs = chatio.stream_output(output_stream, skip_echo_len)
- # NOTE: strip is important to align with the training data.
- conv.messages[-1][-1] = outputs.strip()
-
- if debug:
- print("\n", {"prompt": prompt, "outputs": outputs}, "\n")
diff --git a/spaces/Jasonyoyo/CodeFormer/CodeFormer/basicsr/ops/upfirdn2d/__init__.py b/spaces/Jasonyoyo/CodeFormer/CodeFormer/basicsr/ops/upfirdn2d/__init__.py
deleted file mode 100644
index 397e85bea063e97fc4c12ad4d3e15669b69290bd..0000000000000000000000000000000000000000
--- a/spaces/Jasonyoyo/CodeFormer/CodeFormer/basicsr/ops/upfirdn2d/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from .upfirdn2d import upfirdn2d
-
-__all__ = ['upfirdn2d']
diff --git a/spaces/JavaFXpert/NimGPT-3.5/nim_gpt_functions.py b/spaces/JavaFXpert/NimGPT-3.5/nim_gpt_functions.py
deleted file mode 100644
index 60aac859d9ef0bec0ab89e1fc777f5ae5a6c5b98..0000000000000000000000000000000000000000
--- a/spaces/JavaFXpert/NimGPT-3.5/nim_gpt_functions.py
+++ /dev/null
@@ -1,74 +0,0 @@
-from langchain import OpenAI
-from langchain.prompts import PromptTemplate, FewShotPromptTemplate
-from langchain.chains import LLMChain
-
-EXAMPLES_PROMPT_TEMPLATE = PromptTemplate(
- input_variables=["input", "output"],
- template="Input: {input}\nOutput: {output}"
-)
-
-PLAN_MOVE_PROMPT_EXAMPLES = [
- {"input": "The piles contain 3, 5, 7 sticks", "output": "I'll take one stick from pile A"},
- {"input": "The piles contain 2, 5, 7 sticks", "output": "I'll take one stick from pile B"},
- {"input": "The piles contain 2, 5, 7 sticks", "output": "I'll take five stick from pile B"},
- {"input": "The piles contain 1, 2, 3 sticks", "output": "I'll take two sticks from pile C"},
- {"input": "The piles contain 0, 2, 3 sticks", "output": "I'll take one stick from pile C"},
- {"input": "The piles contain 0, 2, 0 sticks", "output": "I'll take two sticks from pile B"},
-]
-
-PLAN_MOVE_PROMPT_FROM_STRING_EXAMPLES = FewShotPromptTemplate(
- examples=PLAN_MOVE_PROMPT_EXAMPLES,
- example_prompt=EXAMPLES_PROMPT_TEMPLATE,
- prefix="Nim is a two-player game of strategy in which players take turns removing objects from separate piles. "
- "The goal of the game is to remove the last sticks from a pile when the other piles contain 0 sticks. Each "
- "of these inputs represent a game state. For each of these game states please express a logical move that "
- "consists of taking some number of sticks from a pile. "
- "You may not take any sticks from a pile that contains 0 sticks. "
- "You may not take more sticks from a pile than it contains. "
- "You may only take sticks from one pile. ",
- suffix="Input: {text_game_state}\nOutput:",
- input_variables=["text_game_state"],
- example_separator="\n\n"
-)
-
-EXEC_MOVE_PROMPT_EXAMPLES = [
- {"input": "I'll take two sticks from pile A", "output": "0,2"},
- {"input": "I'll take 3 sticks from the first pile", "output": "0,3"},
- {"input": "I'll take two sticks from pile C", "output": "2,2"},
- {"input": "I'll take one stick from the third pile", "output": "2,1"},
- {"input": "From pile B remove 2 sticks", "output": "1,2"},
- {"input": "I'll take the last stick from pile C", "output": "2,1"},
-]
-
-EXEC_MOVE_PROMPT_FROM_STRING_EXAMPLES = FewShotPromptTemplate(
- examples=EXEC_MOVE_PROMPT_EXAMPLES,
- example_prompt=EXAMPLES_PROMPT_TEMPLATE,
- prefix="Express every input as two numbers separated by a comma, where the first number is the zero index pile "
- "number and the second number is the number of sticks to remove.",
- suffix="Input: {move_to_express}\nOutput:",
- input_variables=["move_to_express"],
- example_separator="\n\n"
-)
-
-
-def plan_move(text_game_state, temperature, api_key):
- llm = OpenAI(model_name='text-davinci-003', temperature=temperature, max_tokens=100,
- openai_api_key=api_key)
- llm_chain = LLMChain(llm=llm, prompt=PLAN_MOVE_PROMPT_FROM_STRING_EXAMPLES, verbose=False)
- planned_move = llm_chain.run({'text_game_state': text_game_state}).strip()
- return planned_move
-
-
-def execute_move(move_to_express, nim_game_env, api_key):
- llm = OpenAI(model_name='text-davinci-003', temperature=0.0, max_tokens=10,
- openai_api_key=api_key)
- llm_chain = LLMChain(llm=llm, prompt=EXEC_MOVE_PROMPT_FROM_STRING_EXAMPLES, verbose=False)
- step_tuple_str = llm_chain.run({'move_to_express': move_to_express})
- step_tuple = tuple(int(x) for x in step_tuple_str.split(','))
- try:
- step_result = nim_game_env.step(step_tuple)
- except ValueError:
- return "Invalid move!", [0, 0, 0], 0, True, None
-
- text_observation = "The piles contain " + ", ".join(str(x) for x in step_result[0]) + " sticks."
- return text_observation, step_result[0], step_result[1], step_result[2], step_result[3]
diff --git a/spaces/JavierIA/gccopen/utils/plots.py b/spaces/JavierIA/gccopen/utils/plots.py
deleted file mode 100644
index fdd8d0e853deb228badeeed52fbbe5fb8eb10632..0000000000000000000000000000000000000000
--- a/spaces/JavierIA/gccopen/utils/plots.py
+++ /dev/null
@@ -1,489 +0,0 @@
-# Plotting utils
-
-import glob
-import math
-import os
-import random
-from copy import copy
-from pathlib import Path
-
-import cv2
-import matplotlib
-import matplotlib.pyplot as plt
-import numpy as np
-import pandas as pd
-import seaborn as sns
-import torch
-import yaml
-from PIL import Image, ImageDraw, ImageFont
-from scipy.signal import butter, filtfilt
-
-from utils.general import xywh2xyxy, xyxy2xywh
-from utils.metrics import fitness
-
-# Settings
-matplotlib.rc('font', **{'size': 11})
-matplotlib.use('Agg') # for writing to files only
-
-
-def color_list():
- # Return first 10 plt colors as (r,g,b) https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb
- def hex2rgb(h):
- return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
-
- return [hex2rgb(h) for h in matplotlib.colors.TABLEAU_COLORS.values()] # or BASE_ (8), CSS4_ (148), XKCD_ (949)
-
-
-def hist2d(x, y, n=100):
- # 2d histogram used in labels.png and evolve.png
- xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
- hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
- xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
- yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
- return np.log(hist[xidx, yidx])
-
-
-def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
- # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
- def butter_lowpass(cutoff, fs, order):
- nyq = 0.5 * fs
- normal_cutoff = cutoff / nyq
- return butter(order, normal_cutoff, btype='low', analog=False)
-
- b, a = butter_lowpass(cutoff, fs, order=order)
- return filtfilt(b, a, data) # forward-backward filter
-
-
-def plot_one_box(x, img, color=None, label=None, line_thickness=3):
- # Plots one bounding box on image img
- tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
- color = color or [random.randint(0, 255) for _ in range(3)]
- c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
- cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
- if label:
- tf = max(tl - 1, 1) # font thickness
- t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
- c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
- cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
- cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
-
-
-def plot_one_box_PIL(box, img, color=None, label=None, line_thickness=None):
- img = Image.fromarray(img)
- draw = ImageDraw.Draw(img)
- line_thickness = line_thickness or max(int(min(img.size) / 200), 2)
- draw.rectangle(box, width=line_thickness, outline=tuple(color)) # plot
- if label:
- fontsize = max(round(max(img.size) / 40), 12)
- font = ImageFont.truetype("Arial.ttf", fontsize)
- txt_width, txt_height = font.getsize(label)
- draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=tuple(color))
- draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font)
- return np.asarray(img)
-
-
-def plot_wh_methods(): # from utils.plots import *; plot_wh_methods()
- # Compares the two methods for width-height anchor multiplication
- # https://github.com/ultralytics/yolov3/issues/168
- x = np.arange(-4.0, 4.0, .1)
- ya = np.exp(x)
- yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2
-
- fig = plt.figure(figsize=(6, 3), tight_layout=True)
- plt.plot(x, ya, '.-', label='YOLOv3')
- plt.plot(x, yb ** 2, '.-', label='YOLOR ^2')
- plt.plot(x, yb ** 1.6, '.-', label='YOLOR ^1.6')
- plt.xlim(left=-4, right=4)
- plt.ylim(bottom=0, top=6)
- plt.xlabel('input')
- plt.ylabel('output')
- plt.grid()
- plt.legend()
- fig.savefig('comparison.png', dpi=200)
-
-
-def output_to_target(output):
- # Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
- targets = []
- for i, o in enumerate(output):
- for *box, conf, cls in o.cpu().numpy():
- targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf])
- return np.array(targets)
-
-
-def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):
- # Plot image grid with labels
-
- if isinstance(images, torch.Tensor):
- images = images.cpu().float().numpy()
- if isinstance(targets, torch.Tensor):
- targets = targets.cpu().numpy()
-
- # un-normalise
- if np.max(images[0]) <= 1:
- images *= 255
-
- tl = 3 # line thickness
- tf = max(tl - 1, 1) # font thickness
- bs, _, h, w = images.shape # batch size, _, height, width
- bs = min(bs, max_subplots) # limit plot images
- ns = np.ceil(bs ** 0.5) # number of subplots (square)
-
- # Check if we should resize
- scale_factor = max_size / max(h, w)
- if scale_factor < 1:
- h = math.ceil(scale_factor * h)
- w = math.ceil(scale_factor * w)
-
- colors = color_list() # list of colors
- mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
- for i, img in enumerate(images):
- if i == max_subplots: # if last batch has fewer images than we expect
- break
-
- block_x = int(w * (i // ns))
- block_y = int(h * (i % ns))
-
- img = img.transpose(1, 2, 0)
- if scale_factor < 1:
- img = cv2.resize(img, (w, h))
-
- mosaic[block_y:block_y + h, block_x:block_x + w, :] = img
- if len(targets) > 0:
- image_targets = targets[targets[:, 0] == i]
- boxes = xywh2xyxy(image_targets[:, 2:6]).T
- classes = image_targets[:, 1].astype('int')
- labels = image_targets.shape[1] == 6 # labels if no conf column
- conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred)
-
- if boxes.shape[1]:
- if boxes.max() <= 1.01: # if normalized with tolerance 0.01
- boxes[[0, 2]] *= w # scale to pixels
- boxes[[1, 3]] *= h
- elif scale_factor < 1: # absolute coords need scale if image scales
- boxes *= scale_factor
- boxes[[0, 2]] += block_x
- boxes[[1, 3]] += block_y
- for j, box in enumerate(boxes.T):
- cls = int(classes[j])
- color = colors[cls % len(colors)]
- cls = names[cls] if names else cls
- if labels or conf[j] > 0.25: # 0.25 conf thresh
- label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j])
- plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)
-
- # Draw image filename labels
- if paths:
- label = Path(paths[i]).name[:40] # trim to 40 char
- t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
- cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,
- lineType=cv2.LINE_AA)
-
- # Image border
- cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)
-
- if fname:
- r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size
- mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA)
- # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save
- Image.fromarray(mosaic).save(fname) # PIL save
- return mosaic
-
-
-def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
- # Plot LR simulating training for full epochs
- optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
- y = []
- for _ in range(epochs):
- scheduler.step()
- y.append(optimizer.param_groups[0]['lr'])
- plt.plot(y, '.-', label='LR')
- plt.xlabel('epoch')
- plt.ylabel('LR')
- plt.grid()
- plt.xlim(0, epochs)
- plt.ylim(0)
- plt.savefig(Path(save_dir) / 'LR.png', dpi=200)
- plt.close()
-
-
-def plot_test_txt(): # from utils.plots import *; plot_test()
- # Plot test.txt histograms
- x = np.loadtxt('test.txt', dtype=np.float32)
- box = xyxy2xywh(x[:, :4])
- cx, cy = box[:, 0], box[:, 1]
-
- fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
- ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
- ax.set_aspect('equal')
- plt.savefig('hist2d.png', dpi=300)
-
- fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
- ax[0].hist(cx, bins=600)
- ax[1].hist(cy, bins=600)
- plt.savefig('hist1d.png', dpi=200)
-
-
-def plot_targets_txt(): # from utils.plots import *; plot_targets_txt()
- # Plot targets.txt histograms
- x = np.loadtxt('targets.txt', dtype=np.float32).T
- s = ['x targets', 'y targets', 'width targets', 'height targets']
- fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
- ax = ax.ravel()
- for i in range(4):
- ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
- ax[i].legend()
- ax[i].set_title(s[i])
- plt.savefig('targets.jpg', dpi=200)
-
-
-def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt()
- # Plot study.txt generated by test.py
- fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)
- # ax = ax.ravel()
-
- fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
- # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolor-p6', 'yolor-w6', 'yolor-e6', 'yolor-d6']]:
- for f in sorted(Path(path).glob('study*.txt')):
- y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
- x = np.arange(y.shape[1]) if x is None else np.array(x)
- s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']
- # for i in range(7):
- # ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
- # ax[i].set_title(s[i])
-
- j = y[3].argmax() + 1
- ax2.plot(y[6, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8,
- label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
-
- ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
- 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
-
- ax2.grid(alpha=0.2)
- ax2.set_yticks(np.arange(20, 60, 5))
- ax2.set_xlim(0, 57)
- ax2.set_ylim(30, 55)
- ax2.set_xlabel('GPU Speed (ms/img)')
- ax2.set_ylabel('COCO AP val')
- ax2.legend(loc='lower right')
- plt.savefig(str(Path(path).name) + '.png', dpi=300)
-
-
-def plot_labels(labels, names=(), save_dir=Path(''), loggers=None):
- # plot dataset labels
- print('Plotting labels... ')
- c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
- nc = int(c.max() + 1) # number of classes
- colors = color_list()
- x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height'])
-
- # seaborn correlogram
- sns.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))
- plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200)
- plt.close()
-
- # matplotlib labels
- matplotlib.use('svg') # faster
- ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()
- ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
- ax[0].set_ylabel('instances')
- if 0 < len(names) < 30:
- ax[0].set_xticks(range(len(names)))
- ax[0].set_xticklabels(names, rotation=90, fontsize=10)
- else:
- ax[0].set_xlabel('classes')
- sns.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9)
- sns.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9)
-
- # rectangles
- labels[:, 1:3] = 0.5 # center
- labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000
- img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)
- for cls, *box in labels[:1000]:
- ImageDraw.Draw(img).rectangle(box, width=1, outline=colors[int(cls) % 10]) # plot
- ax[1].imshow(img)
- ax[1].axis('off')
-
- for a in [0, 1, 2, 3]:
- for s in ['top', 'right', 'left', 'bottom']:
- ax[a].spines[s].set_visible(False)
-
- plt.savefig(save_dir / 'labels.jpg', dpi=200)
- matplotlib.use('Agg')
- plt.close()
-
- # loggers
- for k, v in loggers.items() or {}:
- if k == 'wandb' and v:
- v.log({"Labels": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]}, commit=False)
-
-
-def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution()
- # Plot hyperparameter evolution results in evolve.txt
- with open(yaml_file) as f:
- hyp = yaml.load(f, Loader=yaml.SafeLoader)
- x = np.loadtxt('evolve.txt', ndmin=2)
- f = fitness(x)
- # weights = (f - f.min()) ** 2 # for weighted results
- plt.figure(figsize=(10, 12), tight_layout=True)
- matplotlib.rc('font', **{'size': 8})
- for i, (k, v) in enumerate(hyp.items()):
- y = x[:, i + 7]
- # mu = (y * weights).sum() / weights.sum() # best weighted result
- mu = y[f.argmax()] # best single result
- plt.subplot(6, 5, i + 1)
- plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
- plt.plot(mu, f.max(), 'k+', markersize=15)
- plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
- if i % 5 != 0:
- plt.yticks([])
- print('%15s: %.3g' % (k, mu))
- plt.savefig('evolve.png', dpi=200)
- print('\nPlot saved as evolve.png')
-
-
-def profile_idetection(start=0, stop=0, labels=(), save_dir=''):
- # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection()
- ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel()
- s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS']
- files = list(Path(save_dir).glob('frames*.txt'))
- for fi, f in enumerate(files):
- try:
- results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows
- n = results.shape[1] # number of rows
- x = np.arange(start, min(stop, n) if stop else n)
- results = results[:, x]
- t = (results[0] - results[0].min()) # set t0=0s
- results[0] = x
- for i, a in enumerate(ax):
- if i < len(results):
- label = labels[fi] if len(labels) else f.stem.replace('frames_', '')
- a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5)
- a.set_title(s[i])
- a.set_xlabel('time (s)')
- # if fi == len(files) - 1:
- # a.set_ylim(bottom=0)
- for side in ['top', 'right']:
- a.spines[side].set_visible(False)
- else:
- a.remove()
- except Exception as e:
- print('Warning: Plotting error for %s; %s' % (f, e))
-
- ax[1].legend()
- plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200)
-
-
-def plot_results_overlay(start=0, stop=0): # from utils.plots import *; plot_results_overlay()
- # Plot training 'results*.txt', overlaying train and val losses
- s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends
- t = ['Box', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles
- for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
- results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
- n = results.shape[1] # number of rows
- x = range(start, min(stop, n) if stop else n)
- fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True)
- ax = ax.ravel()
- for i in range(5):
- for j in [i, i + 5]:
- y = results[j, x]
- ax[i].plot(x, y, marker='.', label=s[j])
- # y_smooth = butter_lowpass_filtfilt(y)
- # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j])
-
- ax[i].set_title(t[i])
- ax[i].legend()
- ax[i].set_ylabel(f) if i == 0 else None # add filename
- fig.savefig(f.replace('.txt', '.png'), dpi=200)
-
-
-def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''):
- # Plot training 'results*.txt'. from utils.plots import *; plot_results(save_dir='runs/train/exp')
- fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
- ax = ax.ravel()
- s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall',
- 'val Box', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95']
- if bucket:
- # files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
- files = ['results%g.txt' % x for x in id]
- c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id)
- os.system(c)
- else:
- files = list(Path(save_dir).glob('results*.txt'))
- assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir)
- for fi, f in enumerate(files):
- try:
- results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
- n = results.shape[1] # number of rows
- x = range(start, min(stop, n) if stop else n)
- for i in range(10):
- y = results[i, x]
- if i in [0, 1, 2, 5, 6, 7]:
- y[y == 0] = np.nan # don't show zero loss values
- # y /= y[0] # normalize
- label = labels[fi] if len(labels) else f.stem
- ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8)
- ax[i].set_title(s[i])
- # if i in [5, 6, 7]: # share train and val loss y axes
- # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
- except Exception as e:
- print('Warning: Plotting error for %s; %s' % (f, e))
-
- ax[1].legend()
- fig.savefig(Path(save_dir) / 'results.png', dpi=200)
-
-
-def output_to_keypoint(output):
- # Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
- targets = []
- for i, o in enumerate(output):
- kpts = o[:,6:]
- o = o[:,:6]
- for index, (*box, conf, cls) in enumerate(o.detach().cpu().numpy()):
- targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf, *list(kpts.detach().cpu().numpy()[index])])
- return np.array(targets)
-
-
-def plot_skeleton_kpts(im, kpts, steps, orig_shape=None):
- #Plot the skeleton and keypointsfor coco datatset
- palette = np.array([[255, 128, 0], [255, 153, 51], [255, 178, 102],
- [230, 230, 0], [255, 153, 255], [153, 204, 255],
- [255, 102, 255], [255, 51, 255], [102, 178, 255],
- [51, 153, 255], [255, 153, 153], [255, 102, 102],
- [255, 51, 51], [153, 255, 153], [102, 255, 102],
- [51, 255, 51], [0, 255, 0], [0, 0, 255], [255, 0, 0],
- [255, 255, 255]])
-
- skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12],
- [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3],
- [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]
-
- pose_limb_color = palette[[9, 9, 9, 9, 7, 7, 7, 0, 0, 0, 0, 0, 16, 16, 16, 16, 16, 16, 16]]
- pose_kpt_color = palette[[16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 9, 9, 9, 9, 9, 9]]
- radius = 5
- num_kpts = len(kpts) // steps
-
- for kid in range(num_kpts):
- r, g, b = pose_kpt_color[kid]
- x_coord, y_coord = kpts[steps * kid], kpts[steps * kid + 1]
- if not (x_coord % 640 == 0 or y_coord % 640 == 0):
- if steps == 3:
- conf = kpts[steps * kid + 2]
- if conf < 0.5:
- continue
- cv2.circle(im, (int(x_coord), int(y_coord)), radius, (int(r), int(g), int(b)), -1)
-
- for sk_id, sk in enumerate(skeleton):
- r, g, b = pose_limb_color[sk_id]
- pos1 = (int(kpts[(sk[0]-1)*steps]), int(kpts[(sk[0]-1)*steps+1]))
- pos2 = (int(kpts[(sk[1]-1)*steps]), int(kpts[(sk[1]-1)*steps+1]))
- if steps == 3:
- conf1 = kpts[(sk[0]-1)*steps+2]
- conf2 = kpts[(sk[1]-1)*steps+2]
- if conf1<0.5 or conf2<0.5:
- continue
- if pos1[0]%640 == 0 or pos1[1]%640==0 or pos1[0]<0 or pos1[1]<0:
- continue
- if pos2[0] % 640 == 0 or pos2[1] % 640 == 0 or pos2[0]<0 or pos2[1]<0:
- continue
- cv2.line(im, pos1, pos2, (int(r), int(g), int(b)), thickness=2)
diff --git a/spaces/JeffJing/ZookChatBot/steamship/invocable/invocable_response.py b/spaces/JeffJing/ZookChatBot/steamship/invocable/invocable_response.py
deleted file mode 100644
index fad7d90c82bd15c51372267c44f88c8349c9d612..0000000000000000000000000000000000000000
--- a/spaces/JeffJing/ZookChatBot/steamship/invocable/invocable_response.py
+++ /dev/null
@@ -1,231 +0,0 @@
-from __future__ import annotations
-
-import io
-import json
-import logging
-from typing import Any, Dict, Generic, Optional, TypeVar, Union
-
-from pydantic import BaseModel
-from pydantic.generics import GenericModel
-
-from steamship.base import MimeTypes, SteamshipError, Task, TaskState
-from steamship.base.client import Client
-from steamship.base.error import DEFAULT_ERROR_MESSAGE
-from steamship.base.mime_types import ContentEncodings
-from steamship.base.model import CamelModel
-from steamship.utils.binary_utils import flexi_create
-
-
-class Http(CamelModel):
- status: int = None
- # If true, we're signaling to the Steamship Proxy that the `data` field of the SteamshipResponse object
- # has been wrapped in base64. In this situation, we can return the bytes within directly to the Proxy
- # caller without interpreting it.
- base64_wrapped: bool = None
- headers: Dict[str, str] = None
-
-
-T = TypeVar("T")
-
-
-class InvocableResponse(GenericModel, Generic[T]):
- """Mirrors the Response object in the Steamship server."""
-
- data: T = None # Data for successful or synchronous requests.
- status: Task = None # Reporting for errors and async status
- http: Http = None # Additional HTTP information for Steamship Proxy (headers, etc)
-
- def __init__(
- self,
- status: Task = None,
- error: SteamshipError = None,
- http: Http = None,
- data: Any = None,
- string: str = None,
- json: Any = None,
- _bytes: Union[bytes, io.BytesIO] = None,
- mime_type=None,
- ):
- super().__init__()
- # Note:
- # This function has to be very defensively coded since Any errors thrown here will not be returned
- # to the end-user via our proxy (as this is the constructor for the response itself!)
- if http is not None:
- self.http = http
- else:
- self.http = Http(status=200, headers={})
-
- try:
- self.set_data(data=data, string=string, json=json, _bytes=_bytes, mime_type=mime_type)
- except Exception as ex:
- logging.error("Exception within Response.__init__.", exc_info=ex)
- if error is not None:
- if error.message:
- error.message = f"{error.message}. Also found error - unable to serialize data to response. {ex}"
- else:
- error.message = f"Unable to serialize data to response. {ex}"
- else:
- error = SteamshipError(message=f"Unable to serialize data to response. {ex}")
- logging.error(error, exc_info=error)
-
- # Handle the task provided
- if status is None:
- self.status = Task()
- elif isinstance(status, Task):
- self.status = status
- else:
- self.status = Task()
- self.status.state = TaskState.failed
- self.status.status_message = (
- f"Status field of response should be of type Task. "
- f"Instead was of type {type(status)} and had value {status}."
- )
-
- if error:
- self.status.state = TaskState.failed
- self.status.status_message = error.message
- self.status.status_suggestion = error.suggestion
- self.status.status_code = error.code
- logging.error(
- "steamship.invocable.response - Response created with error.", exc_info=error
- )
- else:
- if self.status.state is None:
- self.status.state = TaskState.succeeded
-
- def set_data(
- self,
- data: Any = None,
- string: str = None,
- json: Any = None,
- _bytes: Union[bytes, io.BytesIO] = None,
- mime_type=None,
- ):
- data, mime_type, encoding = flexi_create(
- data=data, string=string, json=json, _bytes=_bytes, mime_type=mime_type
- )
-
- self.data = data
-
- self.http.headers = self.http.headers or {}
- self.http.headers["Content-Type"] = mime_type or MimeTypes.BINARY
-
- if encoding == ContentEncodings.BASE64:
- self.http.base64_wrapped = True
-
- @staticmethod
- def error(
- code: int,
- message: Optional[str] = None,
- error: Optional[SteamshipError] = None,
- exception: Optional[Exception] = None,
- prefix: Optional[str] = None,
- ) -> InvocableResponse[T]:
- """Merges a number of error channels into one unified Response object.
-
- Aggregates all possible messages into a single " | "-delimeted error message.
-
- If the final resulting error message is non-null, prefixes with the provided `prefix`
- """
- # Use or create the return error
- error = error or SteamshipError()
-
- messages = []
- if error.message != DEFAULT_ERROR_MESSAGE:
- messages.append(error.message)
-
- # Set or append the additional message
- if message is not None and message not in messages:
- messages.append(message)
-
- # Set or append the exception
- if exception is not None:
- exception_str = f"{exception}"
- if exception_str not in messages:
- messages.append(exception_str)
-
- messages = [m.strip() for m in messages if m is not None and len(m.strip())]
- if len(messages) > 0:
- error.message = " | ".join(messages)
-
- # Finally, add the prefix if requested.
- if prefix and error.message:
- error.message = f"{prefix}{error.message}"
-
- return InvocableResponse(error=error, http=Http(status=code))
-
- @staticmethod
- def from_obj(obj: Any) -> InvocableResponse: # noqa: C901
- if obj is None:
- return InvocableResponse.error(500, "Handler provided no response.")
-
- if isinstance(obj, InvocableResponse):
- return obj
- elif isinstance(obj, SteamshipError):
- return InvocableResponse.error(500, error=obj)
- elif isinstance(obj, Exception):
- return InvocableResponse.error(500, error=SteamshipError(error=obj))
- elif isinstance(obj, io.BytesIO):
- return InvocableResponse(_bytes=obj)
- elif isinstance(obj, dict):
- return InvocableResponse(json=obj)
- elif isinstance(obj, list):
- return InvocableResponse(json=obj)
- elif isinstance(obj, str):
- return InvocableResponse(string=obj)
- elif isinstance(obj, (float, int, bool)):
- return InvocableResponse(json=obj)
- elif isinstance(obj, CamelModel):
- return InvocableResponse(json=obj.dict(by_alias=True))
- elif isinstance(obj, BaseModel):
- return InvocableResponse(json=obj.dict())
-
- return InvocableResponse.error(
- 500, message=f"Handler provided unknown response type: {type(obj)}"
- )
-
- def post_update(self, client: Client):
- """Pushes this response object to the corresponding Task on the Steamship Engine.
-
- Typically apps and plugins return their results to the Engine synchronously via HTTP.
- But sometimes that's not practice -- for example:
-
- - Microsoft's OCR endpoint returns a Job Token that can be exchanged for updates, and eventually a result.
- - Google's AutoML can take 20-30 minutes to train.
- - Fine-tuning BERT on ECS can take an arbitrarily long amount of time.
-
- In these cases, it can be useful for the package/plugin to occasionally post updates to the Engine outside
- of the Engine's initial synchronous request-response conversation.
- """
- if self.status is None or self.status.task_id is None:
- raise SteamshipError(
- message="An App/Plugin response can only be pushed to the Steamship Engine if "
- + "it is associated with a Task. Please set the `status.task_id` field."
- )
- if client is None:
- raise SteamshipError(
- message="Unable to push Response to Steamship: Associated client is None"
- )
-
- # Create a task object
- task = Task(client=client, task_id=self.status.task_id)
- update_fields = set()
-
- if self.status.state is not None:
- task.state = self.status.state
- update_fields.add("state")
-
- if self.status.status_message is not None:
- task.status_message = self.status.status_message
- update_fields.add("status_message")
-
- if self.status.status_suggestion is not None:
- task.status_suggestion = self.status.status_suggestion
- update_fields.add("status_suggestion")
-
- if self.data is not None:
- # This object itself should always be the output of the Training Task object.
- task.output = json.dumps(self.data)
- update_fields.add("output")
-
- task.post_update(fields=update_fields)
diff --git a/spaces/JohnSmith9982/ChuanhuChatGPT/assets/html/appearance_switcher.html b/spaces/JohnSmith9982/ChuanhuChatGPT/assets/html/appearance_switcher.html
deleted file mode 100644
index 9375071fbdfda7bfd622d7f7bd2dfdd0c494341b..0000000000000000000000000000000000000000
--- a/spaces/JohnSmith9982/ChuanhuChatGPT/assets/html/appearance_switcher.html
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
- {label}
-
-
-
-
-
diff --git a/spaces/JohnSmith9982/VITS-Umamusume-voice-synthesizer/losses.py b/spaces/JohnSmith9982/VITS-Umamusume-voice-synthesizer/losses.py
deleted file mode 100644
index fb22a0e834dd87edaa37bb8190eee2c3c7abe0d5..0000000000000000000000000000000000000000
--- a/spaces/JohnSmith9982/VITS-Umamusume-voice-synthesizer/losses.py
+++ /dev/null
@@ -1,61 +0,0 @@
-import torch
-from torch.nn import functional as F
-
-import commons
-
-
-def feature_loss(fmap_r, fmap_g):
- loss = 0
- for dr, dg in zip(fmap_r, fmap_g):
- for rl, gl in zip(dr, dg):
- rl = rl.float().detach()
- gl = gl.float()
- loss += torch.mean(torch.abs(rl - gl))
-
- return loss * 2
-
-
-def discriminator_loss(disc_real_outputs, disc_generated_outputs):
- loss = 0
- r_losses = []
- g_losses = []
- for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
- dr = dr.float()
- dg = dg.float()
- r_loss = torch.mean((1-dr)**2)
- g_loss = torch.mean(dg**2)
- loss += (r_loss + g_loss)
- r_losses.append(r_loss.item())
- g_losses.append(g_loss.item())
-
- return loss, r_losses, g_losses
-
-
-def generator_loss(disc_outputs):
- loss = 0
- gen_losses = []
- for dg in disc_outputs:
- dg = dg.float()
- l = torch.mean((1-dg)**2)
- gen_losses.append(l)
- loss += l
-
- return loss, gen_losses
-
-
-def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):
- """
- z_p, logs_q: [b, h, t_t]
- m_p, logs_p: [b, h, t_t]
- """
- z_p = z_p.float()
- logs_q = logs_q.float()
- m_p = m_p.float()
- logs_p = logs_p.float()
- z_mask = z_mask.float()
-
- kl = logs_p - logs_q - 0.5
- kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p)
- kl = torch.sum(kl * z_mask)
- l = kl / torch.sum(z_mask)
- return l
diff --git a/spaces/JohnnyPittt/audio-styling/deepafx_st/version.py b/spaces/JohnnyPittt/audio-styling/deepafx_st/version.py
deleted file mode 100644
index ed6b02e21b2774da461e61606451eada5ebc9e18..0000000000000000000000000000000000000000
--- a/spaces/JohnnyPittt/audio-styling/deepafx_st/version.py
+++ /dev/null
@@ -1,6 +0,0 @@
-# !/usr/bin/env python
-# -*- coding: utf-8 -*-
-'''Version info'''
-
-short_version = '0.0'
-version = '0.0.1'
diff --git a/spaces/JunghunleePhD/testfordocker/setup-docker.bash b/spaces/JunghunleePhD/testfordocker/setup-docker.bash
deleted file mode 100644
index 9498b0f4e9928affd740faafd8099c35c5e6defe..0000000000000000000000000000000000000000
--- a/spaces/JunghunleePhD/testfordocker/setup-docker.bash
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-NAME="testfordocker"
-PORT=7860
-APP_PATH="/app"
-
-## 1. stop old container
-docker stop $NAME
-
-## 2. remove old container and images
-docker rm $NAME && docker rmi $NAME
-
-## 3. build new image
-docker build -t $NAME .
-
-## 4. run container
-docker run -itd --name $NAME -p $PORT:7860 -v "$(pwd)/app":$APP_PATH $NAME
-
-echo "CONTAINER IS RUNNING AS $NAME ON $PORT"
\ No newline at end of file
diff --git a/spaces/Kay2048/IKay/upcunet_v3.py b/spaces/Kay2048/IKay/upcunet_v3.py
deleted file mode 100644
index f7919a6cc9efe3b8af73a73e30825a4c7d7d76da..0000000000000000000000000000000000000000
--- a/spaces/Kay2048/IKay/upcunet_v3.py
+++ /dev/null
@@ -1,714 +0,0 @@
-import torch
-from torch import nn as nn
-from torch.nn import functional as F
-import os, sys
-import numpy as np
-
-root_path = os.path.abspath('.')
-sys.path.append(root_path)
-
-
-class SEBlock(nn.Module):
- def __init__(self, in_channels, reduction=8, bias=False):
- super(SEBlock, self).__init__()
- self.conv1 = nn.Conv2d(in_channels, in_channels // reduction, 1, 1, 0, bias=bias)
- self.conv2 = nn.Conv2d(in_channels // reduction, in_channels, 1, 1, 0, bias=bias)
-
- def forward(self, x):
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- x0 = torch.mean(x.float(), dim=(2, 3), keepdim=True).half()
- else:
- x0 = torch.mean(x, dim=(2, 3), keepdim=True)
- x0 = self.conv1(x0)
- x0 = F.relu(x0, inplace=True)
- x0 = self.conv2(x0)
- x0 = torch.sigmoid(x0)
- x = torch.mul(x, x0)
- return x
-
- def forward_mean(self, x, x0):
- x0 = self.conv1(x0)
- x0 = F.relu(x0, inplace=True)
- x0 = self.conv2(x0)
- x0 = torch.sigmoid(x0)
- x = torch.mul(x, x0)
- return x
-
-
-class UNetConv(nn.Module):
- def __init__(self, in_channels, mid_channels, out_channels, se):
- super(UNetConv, self).__init__()
- self.conv = nn.Sequential(
- nn.Conv2d(in_channels, mid_channels, 3, 1, 0),
- nn.LeakyReLU(0.1, inplace=True),
- nn.Conv2d(mid_channels, out_channels, 3, 1, 0),
- nn.LeakyReLU(0.1, inplace=True),
- )
- if se:
- self.seblock = SEBlock(out_channels, reduction=8, bias=True)
- else:
- self.seblock = None
-
- def forward(self, x):
- z = self.conv(x)
- if self.seblock is not None:
- z = self.seblock(z)
- return z
-
-
-class UNet1(nn.Module):
- def __init__(self, in_channels, out_channels, deconv):
- super(UNet1, self).__init__()
- self.conv1 = UNetConv(in_channels, 32, 64, se=False)
- self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0)
- self.conv2 = UNetConv(64, 128, 64, se=True)
- self.conv2_up = nn.ConvTranspose2d(64, 64, 2, 2, 0)
- self.conv3 = nn.Conv2d(64, 64, 3, 1, 0)
-
- if deconv:
- self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 4, 2, 3)
- else:
- self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0)
-
- for m in self.modules():
- if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
- nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
- elif isinstance(m, nn.Linear):
- nn.init.normal_(m.weight, 0, 0.01)
- if m.bias is not None:
- nn.init.constant_(m.bias, 0)
-
- def forward(self, x):
- x1 = self.conv1(x)
- x2 = self.conv1_down(x1)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
- x2 = self.conv2(x2)
- x2 = self.conv2_up(x2)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
-
- x1 = F.pad(x1, (-4, -4, -4, -4))
- x3 = self.conv3(x1 + x2)
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
- z = self.conv_bottom(x3)
- return z
-
- def forward_a(self, x):
- x1 = self.conv1(x)
- x2 = self.conv1_down(x1)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
- x2 = self.conv2.conv(x2)
- return x1, x2
-
- def forward_b(self, x1, x2):
- x2 = self.conv2_up(x2)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
-
- x1 = F.pad(x1, (-4, -4, -4, -4))
- x3 = self.conv3(x1 + x2)
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
- z = self.conv_bottom(x3)
- return z
-
-
-class UNet1x3(nn.Module):
- def __init__(self, in_channels, out_channels, deconv):
- super(UNet1x3, self).__init__()
- self.conv1 = UNetConv(in_channels, 32, 64, se=False)
- self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0)
- self.conv2 = UNetConv(64, 128, 64, se=True)
- self.conv2_up = nn.ConvTranspose2d(64, 64, 2, 2, 0)
- self.conv3 = nn.Conv2d(64, 64, 3, 1, 0)
-
- if deconv:
- self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 5, 3, 2)
- else:
- self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0)
-
- for m in self.modules():
- if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
- nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
- elif isinstance(m, nn.Linear):
- nn.init.normal_(m.weight, 0, 0.01)
- if m.bias is not None:
- nn.init.constant_(m.bias, 0)
-
- def forward(self, x):
- x1 = self.conv1(x)
- x2 = self.conv1_down(x1)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
- x2 = self.conv2(x2)
- x2 = self.conv2_up(x2)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
-
- x1 = F.pad(x1, (-4, -4, -4, -4))
- x3 = self.conv3(x1 + x2)
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
- z = self.conv_bottom(x3)
- return z
-
- def forward_a(self, x):
- x1 = self.conv1(x)
- x2 = self.conv1_down(x1)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
- x2 = self.conv2.conv(x2)
- return x1, x2
-
- def forward_b(self, x1, x2):
- x2 = self.conv2_up(x2)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
-
- x1 = F.pad(x1, (-4, -4, -4, -4))
- x3 = self.conv3(x1 + x2)
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
- z = self.conv_bottom(x3)
- return z
-
-
-class UNet2(nn.Module):
- def __init__(self, in_channels, out_channels, deconv):
- super(UNet2, self).__init__()
-
- self.conv1 = UNetConv(in_channels, 32, 64, se=False)
- self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0)
- self.conv2 = UNetConv(64, 64, 128, se=True)
- self.conv2_down = nn.Conv2d(128, 128, 2, 2, 0)
- self.conv3 = UNetConv(128, 256, 128, se=True)
- self.conv3_up = nn.ConvTranspose2d(128, 128, 2, 2, 0)
- self.conv4 = UNetConv(128, 64, 64, se=True)
- self.conv4_up = nn.ConvTranspose2d(64, 64, 2, 2, 0)
- self.conv5 = nn.Conv2d(64, 64, 3, 1, 0)
-
- if deconv:
- self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 4, 2, 3)
- else:
- self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0)
-
- for m in self.modules():
- if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
- nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
- elif isinstance(m, nn.Linear):
- nn.init.normal_(m.weight, 0, 0.01)
- if m.bias is not None:
- nn.init.constant_(m.bias, 0)
-
- def forward(self, x):
- x1 = self.conv1(x)
- x2 = self.conv1_down(x1)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
- x2 = self.conv2(x2)
-
- x3 = self.conv2_down(x2)
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
- x3 = self.conv3(x3)
- x3 = self.conv3_up(x3)
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
-
- x2 = F.pad(x2, (-4, -4, -4, -4))
- x4 = self.conv4(x2 + x3)
- x4 = self.conv4_up(x4)
- x4 = F.leaky_relu(x4, 0.1, inplace=True)
-
- x1 = F.pad(x1, (-16, -16, -16, -16))
- x5 = self.conv5(x1 + x4)
- x5 = F.leaky_relu(x5, 0.1, inplace=True)
-
- z = self.conv_bottom(x5)
- return z
-
- def forward_a(self, x): # conv234结尾有se
- x1 = self.conv1(x)
- x2 = self.conv1_down(x1)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
- x2 = self.conv2.conv(x2)
- return x1, x2
-
- def forward_b(self, x2): # conv234结尾有se
- x3 = self.conv2_down(x2)
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
- x3 = self.conv3.conv(x3)
- return x3
-
- def forward_c(self, x2, x3): # conv234结尾有se
- x3 = self.conv3_up(x3)
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
-
- x2 = F.pad(x2, (-4, -4, -4, -4))
- x4 = self.conv4.conv(x2 + x3)
- return x4
-
- def forward_d(self, x1, x4): # conv234结尾有se
- x4 = self.conv4_up(x4)
- x4 = F.leaky_relu(x4, 0.1, inplace=True)
-
- x1 = F.pad(x1, (-16, -16, -16, -16))
- x5 = self.conv5(x1 + x4)
- x5 = F.leaky_relu(x5, 0.1, inplace=True)
-
- z = self.conv_bottom(x5)
- return z
-
-
-class UpCunet2x(nn.Module): # 完美tile,全程无损
- def __init__(self, in_channels=3, out_channels=3):
- super(UpCunet2x, self).__init__()
- self.unet1 = UNet1(in_channels, out_channels, deconv=True)
- self.unet2 = UNet2(in_channels, out_channels, deconv=False)
-
- def forward(self, x, tile_mode): # 1.7G
- n, c, h0, w0 = x.shape
- if (tile_mode == 0): # 不tile
- ph = ((h0 - 1) // 2 + 1) * 2
- pw = ((w0 - 1) // 2 + 1) * 2
- x = F.pad(x, (18, 18 + pw - w0, 18, 18 + ph - h0), 'reflect') # 需要保证被2整除
- x = self.unet1.forward(x)
- x0 = self.unet2.forward(x)
- x1 = F.pad(x, (-20, -20, -20, -20))
- x = torch.add(x0, x1)
- if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 2, :w0 * 2]
- return x
- elif (tile_mode == 1): # 对长边减半
- if (w0 >= h0):
- crop_size_w = ((w0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除
- crop_size_h = (h0 - 1) // 2 * 2 + 2 # 能被2整除
- else:
- crop_size_h = ((h0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除
- crop_size_w = (w0 - 1) // 2 * 2 + 2 # 能被2整除
- crop_size = (crop_size_h, crop_size_w) # 6.6G
- elif (tile_mode == 2): # hw都减半
- crop_size = (((h0 - 1) // 4 * 4 + 4) // 2, ((w0 - 1) // 4 * 4 + 4) // 2) # 5.6G
- elif (tile_mode == 3): # hw都三分之一
- crop_size = (((h0 - 1) // 6 * 6 + 6) // 3, ((w0 - 1) // 6 * 6 + 6) // 3) # 4.2G
- elif (tile_mode == 4): # hw都四分之一
- crop_size = (((h0 - 1) // 8 * 8 + 8) // 4, ((w0 - 1) // 8 * 8 + 8) // 4) # 3.7G
- ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0]
- pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1]
- x = F.pad(x, (18, 18 + pw - w0, 18, 18 + ph - h0), 'reflect')
- n, c, h, w = x.shape
- se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device)
- if ("Half" in x.type()):
- se_mean0 = se_mean0.half()
- n_patch = 0
- tmp_dict = {}
- opt_res_dict = {}
- for i in range(0, h - 36, crop_size[0]):
- tmp_dict[i] = {}
- for j in range(0, w - 36, crop_size[1]):
- x_crop = x[:, :, i:i + crop_size[0] + 36, j:j + crop_size[1] + 36]
- n, c1, h1, w1 = x_crop.shape
- tmp0, x_crop = self.unet1.forward_a(x_crop)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True)
- se_mean0 += tmp_se_mean
- n_patch += 1
- tmp_dict[i][j] = (tmp0, x_crop)
- se_mean0 /= n_patch
- se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
- if ("Half" in x.type()):
- se_mean1 = se_mean1.half()
- for i in range(0, h - 36, crop_size[0]):
- for j in range(0, w - 36, crop_size[1]):
- tmp0, x_crop = tmp_dict[i][j]
- x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0)
- opt_unet1 = self.unet1.forward_b(tmp0, x_crop)
- tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True)
- se_mean1 += tmp_se_mean
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2)
- se_mean1 /= n_patch
- se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
- if ("Half" in x.type()):
- se_mean0 = se_mean0.half()
- for i in range(0, h - 36, crop_size[0]):
- for j in range(0, w - 36, crop_size[1]):
- opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j]
- tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1)
- tmp_x3 = self.unet2.forward_b(tmp_x2)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True)
- se_mean0 += tmp_se_mean
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3)
- se_mean0 /= n_patch
- se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64
- if ("Half" in x.type()):
- se_mean1 = se_mean1.half()
- for i in range(0, h - 36, crop_size[0]):
- for j in range(0, w - 36, crop_size[1]):
- opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j]
- tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0)
- tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True)
- se_mean1 += tmp_se_mean
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4)
- se_mean1 /= n_patch
- for i in range(0, h - 36, crop_size[0]):
- opt_res_dict[i] = {}
- for j in range(0, w - 36, crop_size[1]):
- opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j]
- tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1)
- x0 = self.unet2.forward_d(tmp_x1, tmp_x4)
- x1 = F.pad(opt_unet1, (-20, -20, -20, -20))
- x_crop = torch.add(x0, x1) # x0是unet2的最终输出
- opt_res_dict[i][j] = x_crop
- del tmp_dict
- torch.cuda.empty_cache()
- res = torch.zeros((n, c, h * 2 - 72, w * 2 - 72)).to(x.device)
- if ("Half" in x.type()):
- res = res.half()
- for i in range(0, h - 36, crop_size[0]):
- for j in range(0, w - 36, crop_size[1]):
- res[:, :, i * 2:i * 2 + h1 * 2 - 72, j * 2:j * 2 + w1 * 2 - 72] = opt_res_dict[i][j]
- del opt_res_dict
- torch.cuda.empty_cache()
- if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 2, :w0 * 2]
- return res #
-
-
-class UpCunet3x(nn.Module): # 完美tile,全程无损
- def __init__(self, in_channels=3, out_channels=3):
- super(UpCunet3x, self).__init__()
- self.unet1 = UNet1x3(in_channels, out_channels, deconv=True)
- self.unet2 = UNet2(in_channels, out_channels, deconv=False)
-
- def forward(self, x, tile_mode): # 1.7G
- n, c, h0, w0 = x.shape
- if (tile_mode == 0): # 不tile
- ph = ((h0 - 1) // 4 + 1) * 4
- pw = ((w0 - 1) // 4 + 1) * 4
- x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), 'reflect') # 需要保证被2整除
- x = self.unet1.forward(x)
- x0 = self.unet2.forward(x)
- x1 = F.pad(x, (-20, -20, -20, -20))
- x = torch.add(x0, x1)
- if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 3, :w0 * 3]
- return x
- elif (tile_mode == 1): # 对长边减半
- if (w0 >= h0):
- crop_size_w = ((w0 - 1) // 8 * 8 + 8) // 2 # 减半后能被4整除,所以要先被8整除
- crop_size_h = (h0 - 1) // 4 * 4 + 4 # 能被4整除
- else:
- crop_size_h = ((h0 - 1) // 8 * 8 + 8) // 2 # 减半后能被4整除,所以要先被8整除
- crop_size_w = (w0 - 1) // 4 * 4 + 4 # 能被4整除
- crop_size = (crop_size_h, crop_size_w) # 6.6G
- elif (tile_mode == 2): # hw都减半
- crop_size = (((h0 - 1) // 8 * 8 + 8) // 2, ((w0 - 1) // 8 * 8 + 8) // 2) # 5.6G
- elif (tile_mode == 3): # hw都三分之一
- crop_size = (((h0 - 1) // 12 * 12 + 12) // 3, ((w0 - 1) // 12 * 12 + 12) // 3) # 4.2G
- elif (tile_mode == 4): # hw都四分之一
- crop_size = (((h0 - 1) // 16 * 16 + 16) // 4, ((w0 - 1) // 16 * 16 + 16) // 4) # 3.7G
- ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0]
- pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1]
- x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), 'reflect')
- n, c, h, w = x.shape
- se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device)
- if ("Half" in x.type()):
- se_mean0 = se_mean0.half()
- n_patch = 0
- tmp_dict = {}
- opt_res_dict = {}
- for i in range(0, h - 28, crop_size[0]):
- tmp_dict[i] = {}
- for j in range(0, w - 28, crop_size[1]):
- x_crop = x[:, :, i:i + crop_size[0] + 28, j:j + crop_size[1] + 28]
- n, c1, h1, w1 = x_crop.shape
- tmp0, x_crop = self.unet1.forward_a(x_crop)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True)
- se_mean0 += tmp_se_mean
- n_patch += 1
- tmp_dict[i][j] = (tmp0, x_crop)
- se_mean0 /= n_patch
- se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
- if ("Half" in x.type()):
- se_mean1 = se_mean1.half()
- for i in range(0, h - 28, crop_size[0]):
- for j in range(0, w - 28, crop_size[1]):
- tmp0, x_crop = tmp_dict[i][j]
- x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0)
- opt_unet1 = self.unet1.forward_b(tmp0, x_crop)
- tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True)
- se_mean1 += tmp_se_mean
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2)
- se_mean1 /= n_patch
- se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
- if ("Half" in x.type()):
- se_mean0 = se_mean0.half()
- for i in range(0, h - 28, crop_size[0]):
- for j in range(0, w - 28, crop_size[1]):
- opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j]
- tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1)
- tmp_x3 = self.unet2.forward_b(tmp_x2)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True)
- se_mean0 += tmp_se_mean
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3)
- se_mean0 /= n_patch
- se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64
- if ("Half" in x.type()):
- se_mean1 = se_mean1.half()
- for i in range(0, h - 28, crop_size[0]):
- for j in range(0, w - 28, crop_size[1]):
- opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j]
- tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0)
- tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True)
- se_mean1 += tmp_se_mean
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4)
- se_mean1 /= n_patch
- for i in range(0, h - 28, crop_size[0]):
- opt_res_dict[i] = {}
- for j in range(0, w - 28, crop_size[1]):
- opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j]
- tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1)
- x0 = self.unet2.forward_d(tmp_x1, tmp_x4)
- x1 = F.pad(opt_unet1, (-20, -20, -20, -20))
- x_crop = torch.add(x0, x1) # x0是unet2的最终输出
- opt_res_dict[i][j] = x_crop #
- del tmp_dict
- torch.cuda.empty_cache()
- res = torch.zeros((n, c, h * 3 - 84, w * 3 - 84)).to(x.device)
- if ("Half" in x.type()):
- res = res.half()
- for i in range(0, h - 28, crop_size[0]):
- for j in range(0, w - 28, crop_size[1]):
- res[:, :, i * 3:i * 3 + h1 * 3 - 84, j * 3:j * 3 + w1 * 3 - 84] = opt_res_dict[i][j]
- del opt_res_dict
- torch.cuda.empty_cache()
- if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 3, :w0 * 3]
- return res
-
-
-class UpCunet4x(nn.Module): # 完美tile,全程无损
- def __init__(self, in_channels=3, out_channels=3):
- super(UpCunet4x, self).__init__()
- self.unet1 = UNet1(in_channels, 64, deconv=True)
- self.unet2 = UNet2(64, 64, deconv=False)
- self.ps = nn.PixelShuffle(2)
- self.conv_final = nn.Conv2d(64, 12, 3, 1, padding=0, bias=True)
-
- def forward(self, x, tile_mode):
- n, c, h0, w0 = x.shape
- x00 = x
- if (tile_mode == 0): # 不tile
- ph = ((h0 - 1) // 2 + 1) * 2
- pw = ((w0 - 1) // 2 + 1) * 2
- x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), 'reflect') # 需要保证被2整除
- x = self.unet1.forward(x)
- x0 = self.unet2.forward(x)
- x1 = F.pad(x, (-20, -20, -20, -20))
- x = torch.add(x0, x1)
- x = self.conv_final(x)
- x = F.pad(x, (-1, -1, -1, -1))
- x = self.ps(x)
- if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 4, :w0 * 4]
- x += F.interpolate(x00, scale_factor=4, mode='nearest')
- return x
- elif (tile_mode == 1): # 对长边减半
- if (w0 >= h0):
- crop_size_w = ((w0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除
- crop_size_h = (h0 - 1) // 2 * 2 + 2 # 能被2整除
- else:
- crop_size_h = ((h0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除
- crop_size_w = (w0 - 1) // 2 * 2 + 2 # 能被2整除
- crop_size = (crop_size_h, crop_size_w) # 6.6G
- elif (tile_mode == 2): # hw都减半
- crop_size = (((h0 - 1) // 4 * 4 + 4) // 2, ((w0 - 1) // 4 * 4 + 4) // 2) # 5.6G
- elif (tile_mode == 3): # hw都三分之一
- crop_size = (((h0 - 1) // 6 * 6 + 6) // 3, ((w0 - 1) // 6 * 6 + 6) // 3) # 4.1G
- elif (tile_mode == 4): # hw都四分之一
- crop_size = (((h0 - 1) // 8 * 8 + 8) // 4, ((w0 - 1) // 8 * 8 + 8) // 4) # 3.7G
- ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0]
- pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1]
- x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), 'reflect')
- n, c, h, w = x.shape
- se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device)
- if ("Half" in x.type()):
- se_mean0 = se_mean0.half()
- n_patch = 0
- tmp_dict = {}
- opt_res_dict = {}
- for i in range(0, h - 38, crop_size[0]):
- tmp_dict[i] = {}
- for j in range(0, w - 38, crop_size[1]):
- x_crop = x[:, :, i:i + crop_size[0] + 38, j:j + crop_size[1] + 38]
- n, c1, h1, w1 = x_crop.shape
- tmp0, x_crop = self.unet1.forward_a(x_crop)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True)
- se_mean0 += tmp_se_mean
- n_patch += 1
- tmp_dict[i][j] = (tmp0, x_crop)
- se_mean0 /= n_patch
- se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
- if ("Half" in x.type()):
- se_mean1 = se_mean1.half()
- for i in range(0, h - 38, crop_size[0]):
- for j in range(0, w - 38, crop_size[1]):
- tmp0, x_crop = tmp_dict[i][j]
- x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0)
- opt_unet1 = self.unet1.forward_b(tmp0, x_crop)
- tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True)
- se_mean1 += tmp_se_mean
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2)
- se_mean1 /= n_patch
- se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
- if ("Half" in x.type()):
- se_mean0 = se_mean0.half()
- for i in range(0, h - 38, crop_size[0]):
- for j in range(0, w - 38, crop_size[1]):
- opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j]
- tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1)
- tmp_x3 = self.unet2.forward_b(tmp_x2)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True)
- se_mean0 += tmp_se_mean
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3)
- se_mean0 /= n_patch
- se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64
- if ("Half" in x.type()):
- se_mean1 = se_mean1.half()
- for i in range(0, h - 38, crop_size[0]):
- for j in range(0, w - 38, crop_size[1]):
- opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j]
- tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0)
- tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True)
- se_mean1 += tmp_se_mean
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4)
- se_mean1 /= n_patch
- for i in range(0, h - 38, crop_size[0]):
- opt_res_dict[i] = {}
- for j in range(0, w - 38, crop_size[1]):
- opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j]
- tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1)
- x0 = self.unet2.forward_d(tmp_x1, tmp_x4)
- x1 = F.pad(opt_unet1, (-20, -20, -20, -20))
- x_crop = torch.add(x0, x1) # x0是unet2的最终输出
- x_crop = self.conv_final(x_crop)
- x_crop = F.pad(x_crop, (-1, -1, -1, -1))
- x_crop = self.ps(x_crop)
- opt_res_dict[i][j] = x_crop
- del tmp_dict
- torch.cuda.empty_cache()
- res = torch.zeros((n, c, h * 4 - 152, w * 4 - 152)).to(x.device)
- if ("Half" in x.type()):
- res = res.half()
- for i in range(0, h - 38, crop_size[0]):
- for j in range(0, w - 38, crop_size[1]):
- # print(opt_res_dict[i][j].shape,res[:, :, i * 4:i * 4 + h1 * 4 - 144, j * 4:j * 4 + w1 * 4 - 144].shape)
- res[:, :, i * 4:i * 4 + h1 * 4 - 152, j * 4:j * 4 + w1 * 4 - 152] = opt_res_dict[i][j]
- del opt_res_dict
- torch.cuda.empty_cache()
- if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 4, :w0 * 4]
- res += F.interpolate(x00, scale_factor=4, mode='nearest')
- return res #
-
-
-class RealWaifuUpScaler(object):
- def __init__(self, scale, weight_path, half, device):
- weight = torch.load(weight_path, map_location="cpu")
- self.model = eval("UpCunet%sx" % scale)()
- if (half == True):
- self.model = self.model.half().to(device)
- else:
- self.model = self.model.to(device)
- self.model.load_state_dict(weight, strict=True)
- self.model.eval()
- self.half = half
- self.device = device
-
- def np2tensor(self, np_frame):
- if (self.half == False):
- return torch.from_numpy(np.transpose(np_frame, (2, 0, 1))).unsqueeze(0).to(self.device).float() / 255
- else:
- return torch.from_numpy(np.transpose(np_frame, (2, 0, 1))).unsqueeze(0).to(self.device).half() / 255
-
- def tensor2np(self, tensor):
- if (self.half == False):
- return (
- np.transpose((tensor.data.squeeze() * 255.0).round().clamp_(0, 255).byte().cpu().numpy(), (1, 2, 0)))
- else:
- return (np.transpose((tensor.data.squeeze().float() * 255.0).round().clamp_(0, 255).byte().cpu().numpy(),
- (1, 2, 0)))
-
- def __call__(self, frame, tile_mode):
- with torch.no_grad():
- tensor = self.np2tensor(frame)
- result = self.tensor2np(self.model(tensor, tile_mode))
- return result
-
-
-if __name__ == "__main__":
- ###########inference_img
- import time, cv2, sys
- from time import time as ttime
-
- for weight_path, scale in [("weights_v3/up2x-latest-denoise3x.pth", 2), ("weights_v3/up3x-latest-denoise3x.pth", 3),
- ("weights_v3/up4x-latest-denoise3x.pth", 4)]:
- for tile_mode in [0, 1, 2, 3, 4]:
- upscaler2x = RealWaifuUpScaler(scale, weight_path, half=True, device="cuda:0")
- input_dir = "%s/input_dir1" % root_path
- output_dir = "%s/opt-dir-all-test" % root_path
- os.makedirs(output_dir, exist_ok=True)
- for name in os.listdir(input_dir):
- print(name)
- tmp = name.split(".")
- inp_path = os.path.join(input_dir, name)
- suffix = tmp[-1]
- prefix = ".".join(tmp[:-1])
- tmp_path = os.path.join(root_path, "tmp", "%s.%s" % (int(time.time() * 1000000), suffix))
- print(inp_path, tmp_path)
- # 支持中文路径
- # os.link(inp_path, tmp_path)#win用硬链接
- os.symlink(inp_path, tmp_path) # linux用软链接
- frame = cv2.imread(tmp_path)[:, :, [2, 1, 0]]
- t0 = ttime()
- result = upscaler2x(frame, tile_mode=tile_mode)[:, :, ::-1]
- t1 = ttime()
- print(prefix, "done", t1 - t0)
- tmp_opt_path = os.path.join(root_path, "tmp", "%s.%s" % (int(time.time() * 1000000), suffix))
- cv2.imwrite(tmp_opt_path, result)
- n = 0
- while (1):
- if (n == 0):
- suffix = "_%sx_tile%s.png" % (scale, tile_mode)
- else:
- suffix = "_%sx_tile%s_%s.png" % (scale, tile_mode, n) #
- if (os.path.exists(os.path.join(output_dir, prefix + suffix)) == False):
- break
- else:
- n += 1
- final_opt_path = os.path.join(output_dir, prefix + suffix)
- os.rename(tmp_opt_path, final_opt_path)
- os.remove(tmp_path)
diff --git a/spaces/Kayson/InstructDiffusion/stable_diffusion/main.py b/spaces/Kayson/InstructDiffusion/stable_diffusion/main.py
deleted file mode 100644
index 193c50a86a307bd69f52a0c3b89fb5368ed9a222..0000000000000000000000000000000000000000
--- a/spaces/Kayson/InstructDiffusion/stable_diffusion/main.py
+++ /dev/null
@@ -1,744 +0,0 @@
-import argparse, os, sys, datetime, glob, importlib, csv
-import numpy as np
-import time
-import torch
-import torchvision
-import pytorch_lightning as pl
-
-from packaging import version
-from omegaconf import OmegaConf
-from torch.utils.data import random_split, DataLoader, Dataset, Subset
-from functools import partial
-from PIL import Image
-
-from pytorch_lightning import seed_everything
-from pytorch_lightning.trainer import Trainer
-from pytorch_lightning.callbacks import ModelCheckpoint, Callback, LearningRateMonitor
-from pytorch_lightning.utilities.distributed import rank_zero_only
-from pytorch_lightning.utilities import rank_zero_info
-
-from ldm.data.base import Txt2ImgIterableBaseDataset
-from ldm.util import instantiate_from_config
-
-
-def get_parser(**parser_kwargs):
- def str2bool(v):
- if isinstance(v, bool):
- return v
- if v.lower() in ("yes", "true", "t", "y", "1"):
- return True
- elif v.lower() in ("no", "false", "f", "n", "0"):
- return False
- else:
- raise argparse.ArgumentTypeError("Boolean value expected.")
-
- parser = argparse.ArgumentParser(**parser_kwargs)
- parser.add_argument(
- "-n",
- "--name",
- type=str,
- const=True,
- default="",
- nargs="?",
- help="postfix for logdir",
- )
- parser.add_argument(
- "-r",
- "--resume",
- type=str,
- const=True,
- default="",
- nargs="?",
- help="resume from logdir or checkpoint in logdir",
- )
- parser.add_argument(
- "-b",
- "--base",
- nargs="*",
- metavar="base_config.yaml",
- help="paths to base configs. Loaded from left-to-right. "
- "Parameters can be overwritten or added with command-line options of the form `--key value`.",
- default=list(),
- )
- parser.add_argument(
- "-t",
- "--train",
- type=str2bool,
- const=True,
- default=False,
- nargs="?",
- help="train",
- )
- parser.add_argument(
- "--no-test",
- type=str2bool,
- const=True,
- default=False,
- nargs="?",
- help="disable test",
- )
- parser.add_argument(
- "-p",
- "--project",
- help="name of new or path to existing project"
- )
- parser.add_argument(
- "-d",
- "--debug",
- type=str2bool,
- nargs="?",
- const=True,
- default=False,
- help="enable post-mortem debugging",
- )
- parser.add_argument(
- "-s",
- "--seed",
- type=int,
- default=23,
- help="seed for seed_everything",
- )
- parser.add_argument(
- "-f",
- "--postfix",
- type=str,
- default="",
- help="post-postfix for default name",
- )
- parser.add_argument(
- "-l",
- "--logdir",
- type=str,
- default="logs",
- help="directory for logging dat shit",
- )
- parser.add_argument(
- "--scale_lr",
- type=str2bool,
- nargs="?",
- const=True,
- default=True,
- help="scale base-lr by ngpu * batch_size * n_accumulate",
- )
- return parser
-
-
-def nondefault_trainer_args(opt):
- parser = argparse.ArgumentParser()
- parser = Trainer.add_argparse_args(parser)
- args = parser.parse_args([])
- return sorted(k for k in vars(args) if getattr(opt, k) != getattr(args, k))
-
-
-class WrappedDataset(Dataset):
- """Wraps an arbitrary object with __len__ and __getitem__ into a pytorch dataset"""
-
- def __init__(self, dataset):
- self.data = dataset
-
- def __len__(self):
- return len(self.data)
-
- def __getitem__(self, idx):
- return self.data[idx]
-
-
-def worker_init_fn(_):
- worker_info = torch.utils.data.get_worker_info()
-
- dataset = worker_info.dataset
- worker_id = worker_info.id
-
- if isinstance(dataset, Txt2ImgIterableBaseDataset):
- split_size = dataset.num_records // worker_info.num_workers
- # reset num_records to the true number to retain reliable length information
- dataset.sample_ids = dataset.valid_ids[worker_id * split_size:(worker_id + 1) * split_size]
- current_id = np.random.choice(len(np.random.get_state()[1]), 1)
- return np.random.seed(np.random.get_state()[1][current_id] + worker_id)
- else:
- return np.random.seed(np.random.get_state()[1][0] + worker_id)
-
-
-class DataModuleFromConfig(pl.LightningDataModule):
- def __init__(self, batch_size, train=None, validation=None, test=None, predict=None,
- wrap=False, num_workers=None, shuffle_test_loader=False, use_worker_init_fn=False,
- shuffle_val_dataloader=False):
- super().__init__()
- self.batch_size = batch_size
- self.dataset_configs = dict()
- self.num_workers = num_workers if num_workers is not None else batch_size * 2
- self.use_worker_init_fn = use_worker_init_fn
- if train is not None:
- self.dataset_configs["train"] = train
- self.train_dataloader = self._train_dataloader
- if validation is not None:
- self.dataset_configs["validation"] = validation
- self.val_dataloader = partial(self._val_dataloader, shuffle=shuffle_val_dataloader)
- if test is not None:
- self.dataset_configs["test"] = test
- self.test_dataloader = partial(self._test_dataloader, shuffle=shuffle_test_loader)
- if predict is not None:
- self.dataset_configs["predict"] = predict
- self.predict_dataloader = self._predict_dataloader
- self.wrap = wrap
-
- def prepare_data(self):
- for data_cfg in self.dataset_configs.values():
- instantiate_from_config(data_cfg)
-
- def setup(self, stage=None):
- self.datasets = dict(
- (k, instantiate_from_config(self.dataset_configs[k]))
- for k in self.dataset_configs)
- if self.wrap:
- for k in self.datasets:
- self.datasets[k] = WrappedDataset(self.datasets[k])
-
- def _train_dataloader(self):
- is_iterable_dataset = isinstance(self.datasets['train'], Txt2ImgIterableBaseDataset)
- if is_iterable_dataset or self.use_worker_init_fn:
- init_fn = worker_init_fn
- else:
- init_fn = None
- return DataLoader(self.datasets["train"], batch_size=self.batch_size,
- num_workers=self.num_workers, shuffle=False if is_iterable_dataset else True,
- worker_init_fn=init_fn)
-
- def _val_dataloader(self, shuffle=False):
- if isinstance(self.datasets['validation'], Txt2ImgIterableBaseDataset) or self.use_worker_init_fn:
- init_fn = worker_init_fn
- else:
- init_fn = None
- return DataLoader(self.datasets["validation"],
- batch_size=self.batch_size,
- num_workers=self.num_workers,
- worker_init_fn=init_fn,
- shuffle=shuffle)
-
- def _test_dataloader(self, shuffle=False):
- is_iterable_dataset = isinstance(self.datasets['train'], Txt2ImgIterableBaseDataset)
- if is_iterable_dataset or self.use_worker_init_fn:
- init_fn = worker_init_fn
- else:
- init_fn = None
-
- # do not shuffle dataloader for iterable dataset
- shuffle = shuffle and (not is_iterable_dataset)
-
- return DataLoader(self.datasets["test"], batch_size=self.batch_size,
- num_workers=self.num_workers, worker_init_fn=init_fn, shuffle=shuffle)
-
- def _predict_dataloader(self, shuffle=False):
- if isinstance(self.datasets['predict'], Txt2ImgIterableBaseDataset) or self.use_worker_init_fn:
- init_fn = worker_init_fn
- else:
- init_fn = None
- return DataLoader(self.datasets["predict"], batch_size=self.batch_size,
- num_workers=self.num_workers, worker_init_fn=init_fn)
-
-
-class SetupCallback(Callback):
- def __init__(self, resume, now, logdir, ckptdir, cfgdir, config, lightning_config):
- super().__init__()
- self.resume = resume
- self.now = now
- self.logdir = logdir
- self.ckptdir = ckptdir
- self.cfgdir = cfgdir
- self.config = config
- self.lightning_config = lightning_config
-
- def on_keyboard_interrupt(self, trainer, pl_module):
- if trainer.global_rank == 0:
- print("Summoning checkpoint.")
- ckpt_path = os.path.join(self.ckptdir, "last.ckpt")
- trainer.save_checkpoint(ckpt_path)
-
- def on_pretrain_routine_start(self, trainer, pl_module):
- if trainer.global_rank == 0:
- # Create logdirs and save configs
- os.makedirs(self.logdir, exist_ok=True)
- os.makedirs(self.ckptdir, exist_ok=True)
- os.makedirs(self.cfgdir, exist_ok=True)
-
- if "callbacks" in self.lightning_config:
- if 'metrics_over_trainsteps_checkpoint' in self.lightning_config['callbacks']:
- os.makedirs(os.path.join(self.ckptdir, 'trainstep_checkpoints'), exist_ok=True)
- print("Project config")
- print(OmegaConf.to_yaml(self.config))
- OmegaConf.save(self.config,
- os.path.join(self.cfgdir, "{}-project.yaml".format(self.now)))
-
- print("Lightning config")
- print(OmegaConf.to_yaml(self.lightning_config))
- OmegaConf.save(OmegaConf.create({"lightning": self.lightning_config}),
- os.path.join(self.cfgdir, "{}-lightning.yaml".format(self.now)))
-
- else:
- # ModelCheckpoint callback created log directory --- remove it
- if not self.resume and os.path.exists(self.logdir):
- dst, name = os.path.split(self.logdir)
- dst = os.path.join(dst, "child_runs", name)
- os.makedirs(os.path.split(dst)[0], exist_ok=True)
- try:
- os.rename(self.logdir, dst)
- except FileNotFoundError:
- pass
-
-
-class ImageLogger(Callback):
- def __init__(self, batch_frequency, max_images, clamp=True, increase_log_steps=True,
- rescale=True, disabled=False, log_on_batch_idx=False, log_first_step=False,
- log_images_kwargs=None):
- super().__init__()
- self.rescale = rescale
- self.batch_freq = batch_frequency
- self.max_images = max_images
- self.logger_log_images = {
- pl.loggers.TestTubeLogger: self._testtube,
- }
- self.log_steps = [2 ** n for n in range(int(np.log2(self.batch_freq)) + 1)]
- if not increase_log_steps:
- self.log_steps = [self.batch_freq]
- self.clamp = clamp
- self.disabled = disabled
- self.log_on_batch_idx = log_on_batch_idx
- self.log_images_kwargs = log_images_kwargs if log_images_kwargs else {}
- self.log_first_step = log_first_step
-
- @rank_zero_only
- def _testtube(self, pl_module, images, batch_idx, split):
- for k in images:
- grid = torchvision.utils.make_grid(images[k])
- grid = (grid + 1.0) / 2.0 # -1,1 -> 0,1; c,h,w
-
- tag = f"{split}/{k}"
- pl_module.logger.experiment.add_image(
- tag, grid,
- global_step=pl_module.global_step)
-
- @rank_zero_only
- def log_local(self, save_dir, split, images,
- global_step, current_epoch, batch_idx):
- root = os.path.join(save_dir, "images", split)
- for k in images:
- grid = torchvision.utils.make_grid(images[k], nrow=4)
- if self.rescale:
- grid = (grid + 1.0) / 2.0 # -1,1 -> 0,1; c,h,w
- grid = grid.transpose(0, 1).transpose(1, 2).squeeze(-1)
- grid = grid.numpy()
- grid = (grid * 255).astype(np.uint8)
- filename = "{}_gs-{:06}_e-{:06}_b-{:06}.png".format(
- k,
- global_step,
- current_epoch,
- batch_idx)
- path = os.path.join(root, filename)
- os.makedirs(os.path.split(path)[0], exist_ok=True)
- Image.fromarray(grid).save(path)
-
- def log_img(self, pl_module, batch, batch_idx, split="train"):
- check_idx = batch_idx if self.log_on_batch_idx else pl_module.global_step
- if (self.check_frequency(check_idx) and # batch_idx % self.batch_freq == 0
- hasattr(pl_module, "log_images") and
- callable(pl_module.log_images) and
- self.max_images > 0):
- logger = type(pl_module.logger)
-
- is_train = pl_module.training
- if is_train:
- pl_module.eval()
-
- with torch.no_grad():
- images = pl_module.log_images(batch, split=split, **self.log_images_kwargs)
-
- for k in images:
- N = min(images[k].shape[0], self.max_images)
- images[k] = images[k][:N]
- if isinstance(images[k], torch.Tensor):
- images[k] = images[k].detach().cpu()
- if self.clamp:
- images[k] = torch.clamp(images[k], -1., 1.)
-
- self.log_local(pl_module.logger.save_dir, split, images,
- pl_module.global_step, pl_module.current_epoch, batch_idx)
-
- logger_log_images = self.logger_log_images.get(logger, lambda *args, **kwargs: None)
- logger_log_images(pl_module, images, pl_module.global_step, split)
-
- if is_train:
- pl_module.train()
-
- def check_frequency(self, check_idx):
- if ((check_idx % self.batch_freq) == 0 or (check_idx in self.log_steps)) and (
- check_idx > 0 or self.log_first_step):
- try:
- self.log_steps.pop(0)
- except IndexError as e:
- print(e)
- pass
- return True
- return False
-
- def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
- if not self.disabled and (pl_module.global_step > 0 or self.log_first_step):
- self.log_img(pl_module, batch, batch_idx, split="train")
-
- def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
- if not self.disabled and pl_module.global_step > 0:
- self.log_img(pl_module, batch, batch_idx, split="val")
- if hasattr(pl_module, 'calibrate_grad_norm'):
- if (pl_module.calibrate_grad_norm and batch_idx % 25 == 0) and batch_idx > 0:
- self.log_gradients(trainer, pl_module, batch_idx=batch_idx)
-
-
-class CUDACallback(Callback):
- # see https://github.com/SeanNaren/minGPT/blob/master/mingpt/callback.py
- def on_train_epoch_start(self, trainer, pl_module):
- # Reset the memory use counter
- torch.cuda.reset_peak_memory_stats(trainer.root_gpu)
- torch.cuda.synchronize(trainer.root_gpu)
- self.start_time = time.time()
-
- def on_train_epoch_end(self, trainer, pl_module, outputs):
- torch.cuda.synchronize(trainer.root_gpu)
- max_memory = torch.cuda.max_memory_allocated(trainer.root_gpu) / 2 ** 20
- epoch_time = time.time() - self.start_time
-
- try:
- max_memory = trainer.training_type_plugin.reduce(max_memory)
- epoch_time = trainer.training_type_plugin.reduce(epoch_time)
-
- rank_zero_info(f"Average Epoch time: {epoch_time:.2f} seconds")
- rank_zero_info(f"Average Peak memory {max_memory:.2f}MiB")
- except AttributeError:
- pass
-
-
-if __name__ == "__main__":
- # custom parser to specify config files, train, test and debug mode,
- # postfix, resume.
- # `--key value` arguments are interpreted as arguments to the trainer.
- # `nested.key=value` arguments are interpreted as config parameters.
- # configs are merged from left-to-right followed by command line parameters.
-
- # model:
- # base_learning_rate: float
- # target: path to lightning module
- # params:
- # key: value
- # data:
- # target: main.DataModuleFromConfig
- # params:
- # batch_size: int
- # wrap: bool
- # train:
- # target: path to train dataset
- # params:
- # key: value
- # validation:
- # target: path to validation dataset
- # params:
- # key: value
- # test:
- # target: path to test dataset
- # params:
- # key: value
- # lightning: (optional, has sane defaults and can be specified on cmdline)
- # trainer:
- # additional arguments to trainer
- # logger:
- # logger to instantiate
- # modelcheckpoint:
- # modelcheckpoint to instantiate
- # callbacks:
- # callback1:
- # target: importpath
- # params:
- # key: value
-
- now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
-
- # add cwd for convenience and to make classes in this file available when
- # running as `python main.py`
- # (in particular `main.DataModuleFromConfig`)
- sys.path.append(os.getcwd())
-
- parser = get_parser()
- parser = Trainer.add_argparse_args(parser)
-
- opt, unknown = parser.parse_known_args()
- if opt.name and opt.resume:
- raise ValueError(
- "-n/--name and -r/--resume cannot be specified both."
- "If you want to resume training in a new log folder, "
- "use -n/--name in combination with --resume_from_checkpoint"
- )
- if opt.resume:
- if not os.path.exists(opt.resume):
- raise ValueError("Cannot find {}".format(opt.resume))
- if os.path.isfile(opt.resume):
- paths = opt.resume.split("/")
- # idx = len(paths)-paths[::-1].index("logs")+1
- # logdir = "/".join(paths[:idx])
- logdir = "/".join(paths[:-2])
- ckpt = opt.resume
- else:
- assert os.path.isdir(opt.resume), opt.resume
- logdir = opt.resume.rstrip("/")
- ckpt = os.path.join(logdir, "checkpoints", "last.ckpt")
-
- opt.resume_from_checkpoint = ckpt
- base_configs = sorted(glob.glob(os.path.join(logdir, "configs/*.yaml")))
- opt.base = base_configs + opt.base
- _tmp = logdir.split("/")
- nowname = _tmp[-1]
- else:
- if opt.name:
- name = "_" + opt.name
- elif opt.base:
- cfg_fname = os.path.split(opt.base[0])[-1]
- cfg_name = os.path.splitext(cfg_fname)[0]
- name = "_" + cfg_name
- else:
- name = ""
- nowname = now + name + opt.postfix
- logdir = os.path.join(opt.logdir, nowname)
-
- ckptdir = os.path.join(logdir, "checkpoints")
- cfgdir = os.path.join(logdir, "configs")
- seed_everything(opt.seed)
-
- try:
- # init and save configs
- configs = [OmegaConf.load(cfg) for cfg in opt.base]
- cli = OmegaConf.from_dotlist(unknown)
- config = OmegaConf.merge(*configs, cli)
- lightning_config = config.pop("lightning", OmegaConf.create())
- # merge trainer cli with config
- trainer_config = lightning_config.get("trainer", OmegaConf.create())
- # default to ddp
- trainer_config["accelerator"] = "ddp"
- for k in nondefault_trainer_args(opt):
- trainer_config[k] = getattr(opt, k)
- if not "gpus" in trainer_config:
- del trainer_config["accelerator"]
- cpu = True
- else:
- gpuinfo = trainer_config["gpus"]
- print(f"Running on GPUs {gpuinfo}")
- cpu = False
- trainer_opt = argparse.Namespace(**trainer_config)
- lightning_config.trainer = trainer_config
-
- # model
- model = instantiate_from_config(config.model)
-
- # trainer and callbacks
- trainer_kwargs = dict()
-
- # default logger configs
- default_logger_cfgs = {
- "wandb": {
- "target": "pytorch_lightning.loggers.WandbLogger",
- "params": {
- "name": nowname,
- "save_dir": logdir,
- "offline": opt.debug,
- "id": nowname,
- }
- },
- "testtube": {
- "target": "pytorch_lightning.loggers.TestTubeLogger",
- "params": {
- "name": "testtube",
- "save_dir": logdir,
- }
- },
- }
- default_logger_cfg = default_logger_cfgs["testtube"]
- if "logger" in lightning_config:
- logger_cfg = lightning_config.logger
- else:
- logger_cfg = OmegaConf.create()
- logger_cfg = OmegaConf.merge(default_logger_cfg, logger_cfg)
- trainer_kwargs["logger"] = instantiate_from_config(logger_cfg)
-
- # modelcheckpoint - use TrainResult/EvalResult(checkpoint_on=metric) to
- # specify which metric is used to determine best models
- default_modelckpt_cfg = {
- "target": "pytorch_lightning.callbacks.ModelCheckpoint",
- "params": {
- "dirpath": ckptdir,
- "filename": "{epoch:06}",
- "verbose": True,
- "save_last": True,
- }
- }
- if hasattr(model, "monitor"):
- print(f"Monitoring {model.monitor} as checkpoint metric.")
- default_modelckpt_cfg["params"]["monitor"] = model.monitor
- default_modelckpt_cfg["params"]["save_top_k"] = 3
-
- if "modelcheckpoint" in lightning_config:
- modelckpt_cfg = lightning_config.modelcheckpoint
- else:
- modelckpt_cfg = OmegaConf.create()
- modelckpt_cfg = OmegaConf.merge(default_modelckpt_cfg, modelckpt_cfg)
- print(f"Merged modelckpt-cfg: \n{modelckpt_cfg}")
- if version.parse(pl.__version__) < version.parse('1.4.0'):
- trainer_kwargs["checkpoint_callback"] = instantiate_from_config(modelckpt_cfg)
-
- # add callback which sets up log directory
- default_callbacks_cfg = {
- "setup_callback": {
- "target": "main.SetupCallback",
- "params": {
- "resume": opt.resume,
- "now": now,
- "logdir": logdir,
- "ckptdir": ckptdir,
- "cfgdir": cfgdir,
- "config": config,
- "lightning_config": lightning_config,
- }
- },
- "image_logger": {
- "target": "main.ImageLogger",
- "params": {
- "batch_frequency": 750,
- "max_images": 4,
- "clamp": True
- }
- },
- "learning_rate_logger": {
- "target": "main.LearningRateMonitor",
- "params": {
- "logging_interval": "step",
- # "log_momentum": True
- }
- },
- "cuda_callback": {
- "target": "main.CUDACallback"
- },
- }
- if version.parse(pl.__version__) >= version.parse('1.4.0'):
- default_callbacks_cfg.update({'checkpoint_callback': modelckpt_cfg})
-
- if "callbacks" in lightning_config:
- callbacks_cfg = lightning_config.callbacks
- else:
- callbacks_cfg = OmegaConf.create()
-
- if 'metrics_over_trainsteps_checkpoint' in callbacks_cfg:
- print(
- 'Caution: Saving checkpoints every n train steps without deleting. This might require some free space.')
- default_metrics_over_trainsteps_ckpt_dict = {
- 'metrics_over_trainsteps_checkpoint':
- {"target": 'pytorch_lightning.callbacks.ModelCheckpoint',
- 'params': {
- "dirpath": os.path.join(ckptdir, 'trainstep_checkpoints'),
- "filename": "{epoch:06}-{step:09}",
- "verbose": True,
- 'save_top_k': -1,
- 'every_n_train_steps': 10000,
- 'save_weights_only': True
- }
- }
- }
- default_callbacks_cfg.update(default_metrics_over_trainsteps_ckpt_dict)
-
- callbacks_cfg = OmegaConf.merge(default_callbacks_cfg, callbacks_cfg)
- if 'ignore_keys_callback' in callbacks_cfg and hasattr(trainer_opt, 'resume_from_checkpoint'):
- callbacks_cfg.ignore_keys_callback.params['ckpt_path'] = trainer_opt.resume_from_checkpoint
- elif 'ignore_keys_callback' in callbacks_cfg:
- del callbacks_cfg['ignore_keys_callback']
-
- trainer_kwargs["callbacks"] = [instantiate_from_config(callbacks_cfg[k]) for k in callbacks_cfg]
-
- trainer = Trainer.from_argparse_args(trainer_opt, **trainer_kwargs)
- trainer.logdir = logdir ###
-
- # data
- data = instantiate_from_config(config.data)
- # NOTE according to https://pytorch-lightning.readthedocs.io/en/latest/datamodules.html
- # calling these ourselves should not be necessary but it is.
- # lightning still takes care of proper multiprocessing though
- data.prepare_data()
- data.setup()
- print("#### Data #####")
- for k in data.datasets:
- print(f"{k}, {data.datasets[k].__class__.__name__}, {len(data.datasets[k])}")
-
- # configure learning rate
- bs, base_lr = config.data.params.batch_size, config.model.base_learning_rate
- if not cpu:
- ngpu = len(lightning_config.trainer.gpus.strip(",").split(','))
- else:
- ngpu = 1
- if 'accumulate_grad_batches' in lightning_config.trainer:
- accumulate_grad_batches = lightning_config.trainer.accumulate_grad_batches
- else:
- accumulate_grad_batches = 1
- print(f"accumulate_grad_batches = {accumulate_grad_batches}")
- lightning_config.trainer.accumulate_grad_batches = accumulate_grad_batches
- if opt.scale_lr:
- model.learning_rate = accumulate_grad_batches * ngpu * bs * base_lr
- print(
- "Setting learning rate to {:.2e} = {} (accumulate_grad_batches) * {} (num_gpus) * {} (batchsize) * {:.2e} (base_lr)".format(
- model.learning_rate, accumulate_grad_batches, ngpu, bs, base_lr))
- else:
- model.learning_rate = base_lr
- print("++++ NOT USING LR SCALING ++++")
- print(f"Setting learning rate to {model.learning_rate:.2e}")
-
-
- # allow checkpointing via USR1
- def melk(*args, **kwargs):
- # run all checkpoint hooks
- if trainer.global_rank == 0:
- print("Summoning checkpoint.")
- ckpt_path = os.path.join(ckptdir, "last.ckpt")
- trainer.save_checkpoint(ckpt_path)
-
-
- def divein(*args, **kwargs):
- if trainer.global_rank == 0:
- import pudb;
- pudb.set_trace()
-
-
- import signal
-
- signal.signal(signal.SIGUSR1, melk)
- signal.signal(signal.SIGUSR2, divein)
-
- # run
- if opt.train:
- try:
- trainer.fit(model, data)
- except Exception:
- melk()
- raise
- if not opt.no_test and not trainer.interrupted:
- trainer.test(model, data)
- except Exception:
- if opt.debug and trainer.global_rank == 0:
- try:
- import pudb as debugger
- except ImportError:
- import pdb as debugger
- debugger.post_mortem()
- raise
- finally:
- # move newly created debug project to debug_runs
- if opt.debug and not opt.resume and trainer.global_rank == 0:
- dst, name = os.path.split(logdir)
- dst = os.path.join(dst, "debug_runs", name)
- os.makedirs(os.path.split(dst)[0], exist_ok=True)
- os.rename(logdir, dst)
- try:
- if trainer.global_rank == 0:
- print(trainer.profiler.summary())
- except:
- pass
diff --git a/spaces/Kevin676/Clone-Your-Voice/encoder/inference.py b/spaces/Kevin676/Clone-Your-Voice/encoder/inference.py
deleted file mode 100644
index 43862e43e663dc5b2053c0f784dfac98cb0bacb3..0000000000000000000000000000000000000000
--- a/spaces/Kevin676/Clone-Your-Voice/encoder/inference.py
+++ /dev/null
@@ -1,178 +0,0 @@
-from encoder.params_data import *
-from encoder.model import SpeakerEncoder
-from encoder.audio import preprocess_wav # We want to expose this function from here
-from matplotlib import cm
-from encoder import audio
-from pathlib import Path
-import numpy as np
-import torch
-
-_model = None # type: SpeakerEncoder
-_device = None # type: torch.device
-
-
-def load_model(weights_fpath: Path, device=None):
- """
- Loads the model in memory. If this function is not explicitely called, it will be run on the
- first call to embed_frames() with the default weights file.
-
- :param weights_fpath: the path to saved model weights.
- :param device: either a torch device or the name of a torch device (e.g. "cpu", "cuda"). The
- model will be loaded and will run on this device. Outputs will however always be on the cpu.
- If None, will default to your GPU if it"s available, otherwise your CPU.
- """
- # TODO: I think the slow loading of the encoder might have something to do with the device it
- # was saved on. Worth investigating.
- global _model, _device
- if device is None:
- _device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
- elif isinstance(device, str):
- _device = torch.device(device)
- _model = SpeakerEncoder(_device, torch.device("cpu"))
- checkpoint = torch.load(weights_fpath, _device)
- _model.load_state_dict(checkpoint["model_state"])
- _model.eval()
- print("Loaded encoder \"%s\" trained to step %d" % (weights_fpath.name, checkpoint["step"]))
-
-
-def is_loaded():
- return _model is not None
-
-
-def embed_frames_batch(frames_batch):
- """
- Computes embeddings for a batch of mel spectrogram.
-
- :param frames_batch: a batch mel of spectrogram as a numpy array of float32 of shape
- (batch_size, n_frames, n_channels)
- :return: the embeddings as a numpy array of float32 of shape (batch_size, model_embedding_size)
- """
- if _model is None:
- raise Exception("Model was not loaded. Call load_model() before inference.")
-
- frames = torch.from_numpy(frames_batch).to(_device)
- embed = _model.forward(frames).detach().cpu().numpy()
- return embed
-
-
-def compute_partial_slices(n_samples, partial_utterance_n_frames=partials_n_frames,
- min_pad_coverage=0.75, overlap=0.5):
- """
- Computes where to split an utterance waveform and its corresponding mel spectrogram to obtain
- partial utterances of each. Both the waveform and the mel
- spectrogram slices are returned, so as to make each partial utterance waveform correspond to
- its spectrogram. This function assumes that the mel spectrogram parameters used are those
- defined in params_data.py.
-
- The returned ranges may be indexing further than the length of the waveform. It is
- recommended that you pad the waveform with zeros up to wave_slices[-1].stop.
-
- :param n_samples: the number of samples in the waveform
- :param partial_utterance_n_frames: the number of mel spectrogram frames in each partial
- utterance
- :param min_pad_coverage: when reaching the last partial utterance, it may or may not have
- enough frames. If at least of are present,
- then the last partial utterance will be considered, as if we padded the audio. Otherwise,
- it will be discarded, as if we trimmed the audio. If there aren't enough frames for 1 partial
- utterance, this parameter is ignored so that the function always returns at least 1 slice.
- :param overlap: by how much the partial utterance should overlap. If set to 0, the partial
- utterances are entirely disjoint.
- :return: the waveform slices and mel spectrogram slices as lists of array slices. Index
- respectively the waveform and the mel spectrogram with these slices to obtain the partial
- utterances.
- """
- assert 0 <= overlap < 1
- assert 0 < min_pad_coverage <= 1
-
- samples_per_frame = int((sampling_rate * mel_window_step / 1000))
- n_frames = int(np.ceil((n_samples + 1) / samples_per_frame))
- frame_step = max(int(np.round(partial_utterance_n_frames * (1 - overlap))), 1)
-
- # Compute the slices
- wav_slices, mel_slices = [], []
- steps = max(1, n_frames - partial_utterance_n_frames + frame_step + 1)
- for i in range(0, steps, frame_step):
- mel_range = np.array([i, i + partial_utterance_n_frames])
- wav_range = mel_range * samples_per_frame
- mel_slices.append(slice(*mel_range))
- wav_slices.append(slice(*wav_range))
-
- # Evaluate whether extra padding is warranted or not
- last_wav_range = wav_slices[-1]
- coverage = (n_samples - last_wav_range.start) / (last_wav_range.stop - last_wav_range.start)
- if coverage < min_pad_coverage and len(mel_slices) > 1:
- mel_slices = mel_slices[:-1]
- wav_slices = wav_slices[:-1]
-
- return wav_slices, mel_slices
-
-
-def embed_utterance(wav, using_partials=True, return_partials=False, **kwargs):
- """
- Computes an embedding for a single utterance.
-
- # TODO: handle multiple wavs to benefit from batching on GPU
- :param wav: a preprocessed (see audio.py) utterance waveform as a numpy array of float32
- :param using_partials: if True, then the utterance is split in partial utterances of
- frames and the utterance embedding is computed from their
- normalized average. If False, the utterance is instead computed from feeding the entire
- spectogram to the network.
- :param return_partials: if True, the partial embeddings will also be returned along with the
- wav slices that correspond to the partial embeddings.
- :param kwargs: additional arguments to compute_partial_splits()
- :return: the embedding as a numpy array of float32 of shape (model_embedding_size,). If
- is True, the partial utterances as a numpy array of float32 of shape
- (n_partials, model_embedding_size) and the wav partials as a list of slices will also be
- returned. If is simultaneously set to False, both these values will be None
- instead.
- """
- # Process the entire utterance if not using partials
- if not using_partials:
- frames = audio.wav_to_mel_spectrogram(wav)
- embed = embed_frames_batch(frames[None, ...])[0]
- if return_partials:
- return embed, None, None
- return embed
-
- # Compute where to split the utterance into partials and pad if necessary
- wave_slices, mel_slices = compute_partial_slices(len(wav), **kwargs)
- max_wave_length = wave_slices[-1].stop
- if max_wave_length >= len(wav):
- wav = np.pad(wav, (0, max_wave_length - len(wav)), "constant")
-
- # Split the utterance into partials
- frames = audio.wav_to_mel_spectrogram(wav)
- frames_batch = np.array([frames[s] for s in mel_slices])
- partial_embeds = embed_frames_batch(frames_batch)
-
- # Compute the utterance embedding from the partial embeddings
- raw_embed = np.mean(partial_embeds, axis=0)
- embed = raw_embed / np.linalg.norm(raw_embed, 2)
-
- if return_partials:
- return embed, partial_embeds, wave_slices
- return embed
-
-
-def embed_speaker(wavs, **kwargs):
- raise NotImplemented()
-
-
-def plot_embedding_as_heatmap(embed, ax=None, title="", shape=None, color_range=(0, 0.30)):
- import matplotlib.pyplot as plt
- if ax is None:
- ax = plt.gca()
-
- if shape is None:
- height = int(np.sqrt(len(embed)))
- shape = (height, -1)
- embed = embed.reshape(shape)
-
- cmap = cm.get_cmap()
- mappable = ax.imshow(embed, cmap=cmap)
- cbar = plt.colorbar(mappable, ax=ax, fraction=0.046, pad=0.04)
- sm = cm.ScalarMappable(cmap=cmap)
- sm.set_clim(*color_range)
-
- ax.set_xticks([]), ax.set_yticks([])
- ax.set_title(title)
diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/bbox_heads/dii_head.py b/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/bbox_heads/dii_head.py
deleted file mode 100644
index ae9a31bbeb2a8f1da62b457363fa05031d21925a..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/bbox_heads/dii_head.py
+++ /dev/null
@@ -1,422 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from typing import List
-
-import torch
-import torch.nn as nn
-from mmcv.cnn import build_activation_layer, build_norm_layer
-from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention
-from mmengine.config import ConfigDict
-from mmengine.model import bias_init_with_prob
-from torch import Tensor
-
-from mmdet.models.losses import accuracy
-from mmdet.models.task_modules import SamplingResult
-from mmdet.models.utils import multi_apply
-from mmdet.registry import MODELS
-from mmdet.utils import ConfigType, OptConfigType, reduce_mean
-from .bbox_head import BBoxHead
-
-
-@MODELS.register_module()
-class DIIHead(BBoxHead):
- r"""Dynamic Instance Interactive Head for `Sparse R-CNN: End-to-End Object
- Detection with Learnable Proposals `_
-
- Args:
- num_classes (int): Number of class in dataset.
- Defaults to 80.
- num_ffn_fcs (int): The number of fully-connected
- layers in FFNs. Defaults to 2.
- num_heads (int): The hidden dimension of FFNs.
- Defaults to 8.
- num_cls_fcs (int): The number of fully-connected
- layers in classification subnet. Defaults to 1.
- num_reg_fcs (int): The number of fully-connected
- layers in regression subnet. Defaults to 3.
- feedforward_channels (int): The hidden dimension
- of FFNs. Defaults to 2048
- in_channels (int): Hidden_channels of MultiheadAttention.
- Defaults to 256.
- dropout (float): Probability of drop the channel.
- Defaults to 0.0
- ffn_act_cfg (:obj:`ConfigDict` or dict): The activation config
- for FFNs.
- dynamic_conv_cfg (:obj:`ConfigDict` or dict): The convolution
- config for DynamicConv.
- loss_iou (:obj:`ConfigDict` or dict): The config for iou or
- giou loss.
- init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
- dict]): Initialization config dict. Defaults to None.
- """
-
- def __init__(self,
- num_classes: int = 80,
- num_ffn_fcs: int = 2,
- num_heads: int = 8,
- num_cls_fcs: int = 1,
- num_reg_fcs: int = 3,
- feedforward_channels: int = 2048,
- in_channels: int = 256,
- dropout: float = 0.0,
- ffn_act_cfg: ConfigType = dict(type='ReLU', inplace=True),
- dynamic_conv_cfg: ConfigType = dict(
- type='DynamicConv',
- in_channels=256,
- feat_channels=64,
- out_channels=256,
- input_feat_shape=7,
- act_cfg=dict(type='ReLU', inplace=True),
- norm_cfg=dict(type='LN')),
- loss_iou: ConfigType = dict(type='GIoULoss', loss_weight=2.0),
- init_cfg: OptConfigType = None,
- **kwargs) -> None:
- assert init_cfg is None, 'To prevent abnormal initialization ' \
- 'behavior, init_cfg is not allowed to be set'
- super().__init__(
- num_classes=num_classes,
- reg_decoded_bbox=True,
- reg_class_agnostic=True,
- init_cfg=init_cfg,
- **kwargs)
- self.loss_iou = MODELS.build(loss_iou)
- self.in_channels = in_channels
- self.fp16_enabled = False
- self.attention = MultiheadAttention(in_channels, num_heads, dropout)
- self.attention_norm = build_norm_layer(dict(type='LN'), in_channels)[1]
-
- self.instance_interactive_conv = MODELS.build(dynamic_conv_cfg)
- self.instance_interactive_conv_dropout = nn.Dropout(dropout)
- self.instance_interactive_conv_norm = build_norm_layer(
- dict(type='LN'), in_channels)[1]
-
- self.ffn = FFN(
- in_channels,
- feedforward_channels,
- num_ffn_fcs,
- act_cfg=ffn_act_cfg,
- dropout=dropout)
- self.ffn_norm = build_norm_layer(dict(type='LN'), in_channels)[1]
-
- self.cls_fcs = nn.ModuleList()
- for _ in range(num_cls_fcs):
- self.cls_fcs.append(
- nn.Linear(in_channels, in_channels, bias=False))
- self.cls_fcs.append(
- build_norm_layer(dict(type='LN'), in_channels)[1])
- self.cls_fcs.append(
- build_activation_layer(dict(type='ReLU', inplace=True)))
-
- # over load the self.fc_cls in BBoxHead
- if self.loss_cls.use_sigmoid:
- self.fc_cls = nn.Linear(in_channels, self.num_classes)
- else:
- self.fc_cls = nn.Linear(in_channels, self.num_classes + 1)
-
- self.reg_fcs = nn.ModuleList()
- for _ in range(num_reg_fcs):
- self.reg_fcs.append(
- nn.Linear(in_channels, in_channels, bias=False))
- self.reg_fcs.append(
- build_norm_layer(dict(type='LN'), in_channels)[1])
- self.reg_fcs.append(
- build_activation_layer(dict(type='ReLU', inplace=True)))
- # over load the self.fc_cls in BBoxHead
- self.fc_reg = nn.Linear(in_channels, 4)
-
- assert self.reg_class_agnostic, 'DIIHead only ' \
- 'suppport `reg_class_agnostic=True` '
- assert self.reg_decoded_bbox, 'DIIHead only ' \
- 'suppport `reg_decoded_bbox=True`'
-
- def init_weights(self) -> None:
- """Use xavier initialization for all weight parameter and set
- classification head bias as a specific value when use focal loss."""
- super().init_weights()
- for p in self.parameters():
- if p.dim() > 1:
- nn.init.xavier_uniform_(p)
- else:
- # adopt the default initialization for
- # the weight and bias of the layer norm
- pass
- if self.loss_cls.use_sigmoid:
- bias_init = bias_init_with_prob(0.01)
- nn.init.constant_(self.fc_cls.bias, bias_init)
-
- def forward(self, roi_feat: Tensor, proposal_feat: Tensor) -> tuple:
- """Forward function of Dynamic Instance Interactive Head.
-
- Args:
- roi_feat (Tensor): Roi-pooling features with shape
- (batch_size*num_proposals, feature_dimensions,
- pooling_h , pooling_w).
- proposal_feat (Tensor): Intermediate feature get from
- diihead in last stage, has shape
- (batch_size, num_proposals, feature_dimensions)
-
- Returns:
- tuple[Tensor]: Usually a tuple of classification scores
- and bbox prediction and a intermediate feature.
-
- - cls_scores (Tensor): Classification scores for
- all proposals, has shape
- (batch_size, num_proposals, num_classes).
- - bbox_preds (Tensor): Box energies / deltas for
- all proposals, has shape
- (batch_size, num_proposals, 4).
- - obj_feat (Tensor): Object feature before classification
- and regression subnet, has shape
- (batch_size, num_proposal, feature_dimensions).
- - attn_feats (Tensor): Intermediate feature.
- """
- N, num_proposals = proposal_feat.shape[:2]
-
- # Self attention
- proposal_feat = proposal_feat.permute(1, 0, 2)
- proposal_feat = self.attention_norm(self.attention(proposal_feat))
- attn_feats = proposal_feat.permute(1, 0, 2)
-
- # instance interactive
- proposal_feat = attn_feats.reshape(-1, self.in_channels)
- proposal_feat_iic = self.instance_interactive_conv(
- proposal_feat, roi_feat)
- proposal_feat = proposal_feat + self.instance_interactive_conv_dropout(
- proposal_feat_iic)
- obj_feat = self.instance_interactive_conv_norm(proposal_feat)
-
- # FFN
- obj_feat = self.ffn_norm(self.ffn(obj_feat))
-
- cls_feat = obj_feat
- reg_feat = obj_feat
-
- for cls_layer in self.cls_fcs:
- cls_feat = cls_layer(cls_feat)
- for reg_layer in self.reg_fcs:
- reg_feat = reg_layer(reg_feat)
-
- cls_score = self.fc_cls(cls_feat).view(
- N, num_proposals, self.num_classes
- if self.loss_cls.use_sigmoid else self.num_classes + 1)
- bbox_delta = self.fc_reg(reg_feat).view(N, num_proposals, 4)
-
- return cls_score, bbox_delta, obj_feat.view(
- N, num_proposals, self.in_channels), attn_feats
-
- def loss_and_target(self,
- cls_score: Tensor,
- bbox_pred: Tensor,
- sampling_results: List[SamplingResult],
- rcnn_train_cfg: ConfigType,
- imgs_whwh: Tensor,
- concat: bool = True,
- reduction_override: str = None) -> dict:
- """Calculate the loss based on the features extracted by the DIIHead.
-
- Args:
- cls_score (Tensor): Classification prediction
- results of all class, has shape
- (batch_size * num_proposals_single_image, num_classes)
- bbox_pred (Tensor): Regression prediction results, has shape
- (batch_size * num_proposals_single_image, 4), the last
- dimension 4 represents [tl_x, tl_y, br_x, br_y].
- sampling_results (List[obj:SamplingResult]): Assign results of
- all images in a batch after sampling.
- rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.
- imgs_whwh (Tensor): imgs_whwh (Tensor): Tensor with\
- shape (batch_size, num_proposals, 4), the last
- dimension means
- [img_width,img_height, img_width, img_height].
- concat (bool): Whether to concatenate the results of all
- the images in a single batch. Defaults to True.
- reduction_override (str, optional): The reduction
- method used to override the original reduction
- method of the loss. Options are "none",
- "mean" and "sum". Defaults to None.
-
- Returns:
- dict: A dictionary of loss and targets components.
- The targets are only used for cascade rcnn.
- """
- cls_reg_targets = self.get_targets(
- sampling_results=sampling_results,
- rcnn_train_cfg=rcnn_train_cfg,
- concat=concat)
- (labels, label_weights, bbox_targets, bbox_weights) = cls_reg_targets
-
- losses = dict()
- bg_class_ind = self.num_classes
- # note in spare rcnn num_gt == num_pos
- pos_inds = (labels >= 0) & (labels < bg_class_ind)
- num_pos = pos_inds.sum().float()
- avg_factor = reduce_mean(num_pos)
- if cls_score is not None:
- if cls_score.numel() > 0:
- losses['loss_cls'] = self.loss_cls(
- cls_score,
- labels,
- label_weights,
- avg_factor=avg_factor,
- reduction_override=reduction_override)
- losses['pos_acc'] = accuracy(cls_score[pos_inds],
- labels[pos_inds])
- if bbox_pred is not None:
- # 0~self.num_classes-1 are FG, self.num_classes is BG
- # do not perform bounding box regression for BG anymore.
- if pos_inds.any():
- pos_bbox_pred = bbox_pred.reshape(bbox_pred.size(0),
- 4)[pos_inds.type(torch.bool)]
- imgs_whwh = imgs_whwh.reshape(bbox_pred.size(0),
- 4)[pos_inds.type(torch.bool)]
- losses['loss_bbox'] = self.loss_bbox(
- pos_bbox_pred / imgs_whwh,
- bbox_targets[pos_inds.type(torch.bool)] / imgs_whwh,
- bbox_weights[pos_inds.type(torch.bool)],
- avg_factor=avg_factor)
- losses['loss_iou'] = self.loss_iou(
- pos_bbox_pred,
- bbox_targets[pos_inds.type(torch.bool)],
- bbox_weights[pos_inds.type(torch.bool)],
- avg_factor=avg_factor)
- else:
- losses['loss_bbox'] = bbox_pred.sum() * 0
- losses['loss_iou'] = bbox_pred.sum() * 0
- return dict(loss_bbox=losses, bbox_targets=cls_reg_targets)
-
- def _get_targets_single(self, pos_inds: Tensor, neg_inds: Tensor,
- pos_priors: Tensor, neg_priors: Tensor,
- pos_gt_bboxes: Tensor, pos_gt_labels: Tensor,
- cfg: ConfigDict) -> tuple:
- """Calculate the ground truth for proposals in the single image
- according to the sampling results.
-
- Almost the same as the implementation in `bbox_head`,
- we add pos_inds and neg_inds to select positive and
- negative samples instead of selecting the first num_pos
- as positive samples.
-
- Args:
- pos_inds (Tensor): The length is equal to the
- positive sample numbers contain all index
- of the positive sample in the origin proposal set.
- neg_inds (Tensor): The length is equal to the
- negative sample numbers contain all index
- of the negative sample in the origin proposal set.
- pos_priors (Tensor): Contains all the positive boxes,
- has shape (num_pos, 4), the last dimension 4
- represents [tl_x, tl_y, br_x, br_y].
- neg_priors (Tensor): Contains all the negative boxes,
- has shape (num_neg, 4), the last dimension 4
- represents [tl_x, tl_y, br_x, br_y].
- pos_gt_bboxes (Tensor): Contains gt_boxes for
- all positive samples, has shape (num_pos, 4),
- the last dimension 4
- represents [tl_x, tl_y, br_x, br_y].
- pos_gt_labels (Tensor): Contains gt_labels for
- all positive samples, has shape (num_pos, ).
- cfg (obj:`ConfigDict`): `train_cfg` of R-CNN.
-
- Returns:
- Tuple[Tensor]: Ground truth for proposals in a single image.
- Containing the following Tensors:
-
- - labels(Tensor): Gt_labels for all proposals, has
- shape (num_proposals,).
- - label_weights(Tensor): Labels_weights for all proposals, has
- shape (num_proposals,).
- - bbox_targets(Tensor):Regression target for all proposals, has
- shape (num_proposals, 4), the last dimension 4
- represents [tl_x, tl_y, br_x, br_y].
- - bbox_weights(Tensor):Regression weights for all proposals,
- has shape (num_proposals, 4).
- """
- num_pos = pos_priors.size(0)
- num_neg = neg_priors.size(0)
- num_samples = num_pos + num_neg
-
- # original implementation uses new_zeros since BG are set to be 0
- # now use empty & fill because BG cat_id = num_classes,
- # FG cat_id = [0, num_classes-1]
- labels = pos_priors.new_full((num_samples, ),
- self.num_classes,
- dtype=torch.long)
- label_weights = pos_priors.new_zeros(num_samples)
- bbox_targets = pos_priors.new_zeros(num_samples, 4)
- bbox_weights = pos_priors.new_zeros(num_samples, 4)
- if num_pos > 0:
- labels[pos_inds] = pos_gt_labels
- pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
- label_weights[pos_inds] = pos_weight
- if not self.reg_decoded_bbox:
- pos_bbox_targets = self.bbox_coder.encode(
- pos_priors, pos_gt_bboxes)
- else:
- pos_bbox_targets = pos_gt_bboxes
- bbox_targets[pos_inds, :] = pos_bbox_targets
- bbox_weights[pos_inds, :] = 1
- if num_neg > 0:
- label_weights[neg_inds] = 1.0
-
- return labels, label_weights, bbox_targets, bbox_weights
-
- def get_targets(self,
- sampling_results: List[SamplingResult],
- rcnn_train_cfg: ConfigDict,
- concat: bool = True) -> tuple:
- """Calculate the ground truth for all samples in a batch according to
- the sampling_results.
-
- Almost the same as the implementation in bbox_head, we passed
- additional parameters pos_inds_list and neg_inds_list to
- `_get_targets_single` function.
-
- Args:
- sampling_results (List[obj:SamplingResult]): Assign results of
- all images in a batch after sampling.
- rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.
- concat (bool): Whether to concatenate the results of all
- the images in a single batch.
-
- Returns:
- Tuple[Tensor]: Ground truth for proposals in a single image.
- Containing the following list of Tensors:
-
- - labels (list[Tensor],Tensor): Gt_labels for all
- proposals in a batch, each tensor in list has
- shape (num_proposals,) when `concat=False`, otherwise just
- a single tensor has shape (num_all_proposals,).
- - label_weights (list[Tensor]): Labels_weights for
- all proposals in a batch, each tensor in list has shape
- (num_proposals,) when `concat=False`, otherwise just a
- single tensor has shape (num_all_proposals,).
- - bbox_targets (list[Tensor],Tensor): Regression target
- for all proposals in a batch, each tensor in list has
- shape (num_proposals, 4) when `concat=False`, otherwise
- just a single tensor has shape (num_all_proposals, 4),
- the last dimension 4 represents [tl_x, tl_y, br_x, br_y].
- - bbox_weights (list[tensor],Tensor): Regression weights for
- all proposals in a batch, each tensor in list has shape
- (num_proposals, 4) when `concat=False`, otherwise just a
- single tensor has shape (num_all_proposals, 4).
- """
- pos_inds_list = [res.pos_inds for res in sampling_results]
- neg_inds_list = [res.neg_inds for res in sampling_results]
- pos_priors_list = [res.pos_priors for res in sampling_results]
- neg_priors_list = [res.neg_priors for res in sampling_results]
- pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results]
- pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results]
- labels, label_weights, bbox_targets, bbox_weights = multi_apply(
- self._get_targets_single,
- pos_inds_list,
- neg_inds_list,
- pos_priors_list,
- neg_priors_list,
- pos_gt_bboxes_list,
- pos_gt_labels_list,
- cfg=rcnn_train_cfg)
- if concat:
- labels = torch.cat(labels, 0)
- label_weights = torch.cat(label_weights, 0)
- bbox_targets = torch.cat(bbox_targets, 0)
- bbox_weights = torch.cat(bbox_weights, 0)
- return labels, label_weights, bbox_targets, bbox_weights
diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/bbox_heads/multi_instance_bbox_head.py b/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/bbox_heads/multi_instance_bbox_head.py
deleted file mode 100644
index 1c888f1e78d60433bf0333c642cc2f89e6d95614..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/bbox_heads/multi_instance_bbox_head.py
+++ /dev/null
@@ -1,622 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from typing import List, Optional, Tuple, Union
-
-import numpy as np
-import torch
-import torch.nn.functional as F
-from mmcv.cnn import ConvModule
-from mmengine.config import ConfigDict
-from mmengine.structures import InstanceData
-from torch import Tensor, nn
-
-from mmdet.models.roi_heads.bbox_heads.bbox_head import BBoxHead
-from mmdet.models.task_modules.samplers import SamplingResult
-from mmdet.models.utils import empty_instances
-from mmdet.registry import MODELS
-from mmdet.structures.bbox import bbox_overlaps
-
-
-@MODELS.register_module()
-class MultiInstanceBBoxHead(BBoxHead):
- r"""Bbox head used in CrowdDet.
-
- .. code-block:: none
-
- /-> cls convs_1 -> cls fcs_1 -> cls_1
- |--
- | \-> reg convs_1 -> reg fcs_1 -> reg_1
- |
- | /-> cls convs_2 -> cls fcs_2 -> cls_2
- shared convs -> shared fcs |--
- | \-> reg convs_2 -> reg fcs_2 -> reg_2
- |
- | ...
- |
- | /-> cls convs_k -> cls fcs_k -> cls_k
- |--
- \-> reg convs_k -> reg fcs_k -> reg_k
-
-
- Args:
- num_instance (int): The number of branches after shared fcs.
- Defaults to 2.
- with_refine (bool): Whether to use refine module. Defaults to False.
- num_shared_convs (int): The number of shared convs. Defaults to 0.
- num_shared_fcs (int): The number of shared fcs. Defaults to 2.
- num_cls_convs (int): The number of cls convs. Defaults to 0.
- num_cls_fcs (int): The number of cls fcs. Defaults to 0.
- num_reg_convs (int): The number of reg convs. Defaults to 0.
- num_reg_fcs (int): The number of reg fcs. Defaults to 0.
- conv_out_channels (int): The number of conv out channels.
- Defaults to 256.
- fc_out_channels (int): The number of fc out channels. Defaults to 1024.
- init_cfg (dict or list[dict], optional): Initialization config dict.
- Defaults to None.
- """ # noqa: W605
-
- def __init__(self,
- num_instance: int = 2,
- with_refine: bool = False,
- num_shared_convs: int = 0,
- num_shared_fcs: int = 2,
- num_cls_convs: int = 0,
- num_cls_fcs: int = 0,
- num_reg_convs: int = 0,
- num_reg_fcs: int = 0,
- conv_out_channels: int = 256,
- fc_out_channels: int = 1024,
- init_cfg: Optional[Union[dict, ConfigDict]] = None,
- *args,
- **kwargs) -> None:
- super().__init__(*args, init_cfg=init_cfg, **kwargs)
- assert (num_shared_convs + num_shared_fcs + num_cls_convs +
- num_cls_fcs + num_reg_convs + num_reg_fcs > 0)
- assert num_instance == 2, 'Currently only 2 instances are supported'
- if num_cls_convs > 0 or num_reg_convs > 0:
- assert num_shared_fcs == 0
- if not self.with_cls:
- assert num_cls_convs == 0 and num_cls_fcs == 0
- if not self.with_reg:
- assert num_reg_convs == 0 and num_reg_fcs == 0
- self.num_instance = num_instance
- self.num_shared_convs = num_shared_convs
- self.num_shared_fcs = num_shared_fcs
- self.num_cls_convs = num_cls_convs
- self.num_cls_fcs = num_cls_fcs
- self.num_reg_convs = num_reg_convs
- self.num_reg_fcs = num_reg_fcs
- self.conv_out_channels = conv_out_channels
- self.fc_out_channels = fc_out_channels
- self.with_refine = with_refine
-
- # add shared convs and fcs
- self.shared_convs, self.shared_fcs, last_layer_dim = \
- self._add_conv_fc_branch(
- self.num_shared_convs, self.num_shared_fcs, self.in_channels,
- True)
- self.shared_out_channels = last_layer_dim
- self.relu = nn.ReLU(inplace=True)
-
- if self.with_refine:
- refine_model_cfg = {
- 'type': 'Linear',
- 'in_features': self.shared_out_channels + 20,
- 'out_features': self.shared_out_channels
- }
- self.shared_fcs_ref = MODELS.build(refine_model_cfg)
- self.fc_cls_ref = nn.ModuleList()
- self.fc_reg_ref = nn.ModuleList()
-
- self.cls_convs = nn.ModuleList()
- self.cls_fcs = nn.ModuleList()
- self.reg_convs = nn.ModuleList()
- self.reg_fcs = nn.ModuleList()
- self.cls_last_dim = list()
- self.reg_last_dim = list()
- self.fc_cls = nn.ModuleList()
- self.fc_reg = nn.ModuleList()
- for k in range(self.num_instance):
- # add cls specific branch
- cls_convs, cls_fcs, cls_last_dim = self._add_conv_fc_branch(
- self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)
- self.cls_convs.append(cls_convs)
- self.cls_fcs.append(cls_fcs)
- self.cls_last_dim.append(cls_last_dim)
-
- # add reg specific branch
- reg_convs, reg_fcs, reg_last_dim = self._add_conv_fc_branch(
- self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)
- self.reg_convs.append(reg_convs)
- self.reg_fcs.append(reg_fcs)
- self.reg_last_dim.append(reg_last_dim)
-
- if self.num_shared_fcs == 0 and not self.with_avg_pool:
- if self.num_cls_fcs == 0:
- self.cls_last_dim *= self.roi_feat_area
- if self.num_reg_fcs == 0:
- self.reg_last_dim *= self.roi_feat_area
-
- if self.with_cls:
- if self.custom_cls_channels:
- cls_channels = self.loss_cls.get_cls_channels(
- self.num_classes)
- else:
- cls_channels = self.num_classes + 1
- cls_predictor_cfg_ = self.cls_predictor_cfg.copy() # deepcopy
- cls_predictor_cfg_.update(
- in_features=self.cls_last_dim[k],
- out_features=cls_channels)
- self.fc_cls.append(MODELS.build(cls_predictor_cfg_))
- if self.with_refine:
- self.fc_cls_ref.append(MODELS.build(cls_predictor_cfg_))
-
- if self.with_reg:
- out_dim_reg = (4 if self.reg_class_agnostic else 4 *
- self.num_classes)
- reg_predictor_cfg_ = self.reg_predictor_cfg.copy()
- reg_predictor_cfg_.update(
- in_features=self.reg_last_dim[k], out_features=out_dim_reg)
- self.fc_reg.append(MODELS.build(reg_predictor_cfg_))
- if self.with_refine:
- self.fc_reg_ref.append(MODELS.build(reg_predictor_cfg_))
-
- if init_cfg is None:
- # when init_cfg is None,
- # It has been set to
- # [[dict(type='Normal', std=0.01, override=dict(name='fc_cls'))],
- # [dict(type='Normal', std=0.001, override=dict(name='fc_reg'))]
- # after `super(ConvFCBBoxHead, self).__init__()`
- # we only need to append additional configuration
- # for `shared_fcs`, `cls_fcs` and `reg_fcs`
- self.init_cfg += [
- dict(
- type='Xavier',
- distribution='uniform',
- override=[
- dict(name='shared_fcs'),
- dict(name='cls_fcs'),
- dict(name='reg_fcs')
- ])
- ]
-
- def _add_conv_fc_branch(self,
- num_branch_convs: int,
- num_branch_fcs: int,
- in_channels: int,
- is_shared: bool = False) -> tuple:
- """Add shared or separable branch.
-
- convs -> avg pool (optional) -> fcs
- """
- last_layer_dim = in_channels
- # add branch specific conv layers
- branch_convs = nn.ModuleList()
- if num_branch_convs > 0:
- for i in range(num_branch_convs):
- conv_in_channels = (
- last_layer_dim if i == 0 else self.conv_out_channels)
- branch_convs.append(
- ConvModule(
- conv_in_channels, self.conv_out_channels, 3,
- padding=1))
- last_layer_dim = self.conv_out_channels
- # add branch specific fc layers
- branch_fcs = nn.ModuleList()
- if num_branch_fcs > 0:
- # for shared branch, only consider self.with_avg_pool
- # for separated branches, also consider self.num_shared_fcs
- if (is_shared
- or self.num_shared_fcs == 0) and not self.with_avg_pool:
- last_layer_dim *= self.roi_feat_area
- for i in range(num_branch_fcs):
- fc_in_channels = (
- last_layer_dim if i == 0 else self.fc_out_channels)
- branch_fcs.append(
- nn.Linear(fc_in_channels, self.fc_out_channels))
- last_layer_dim = self.fc_out_channels
- return branch_convs, branch_fcs, last_layer_dim
-
- def forward(self, x: Tuple[Tensor]) -> tuple:
- """Forward features from the upstream network.
-
- Args:
- x (tuple[Tensor]): Features from the upstream network, each is
- a 4D-tensor.
-
- Returns:
- tuple: A tuple of classification scores and bbox prediction.
-
- - cls_score (Tensor): Classification scores for all scale
- levels, each is a 4D-tensor, the channels number is
- num_base_priors * num_classes.
- - bbox_pred (Tensor): Box energies / deltas for all scale
- levels, each is a 4D-tensor, the channels number is
- num_base_priors * 4.
- - cls_score_ref (Tensor): The cls_score after refine model.
- - bbox_pred_ref (Tensor): The bbox_pred after refine model.
- """
- # shared part
- if self.num_shared_convs > 0:
- for conv in self.shared_convs:
- x = conv(x)
-
- if self.num_shared_fcs > 0:
- if self.with_avg_pool:
- x = self.avg_pool(x)
-
- x = x.flatten(1)
- for fc in self.shared_fcs:
- x = self.relu(fc(x))
-
- x_cls = x
- x_reg = x
- # separate branches
- cls_score = list()
- bbox_pred = list()
- for k in range(self.num_instance):
- for conv in self.cls_convs[k]:
- x_cls = conv(x_cls)
- if x_cls.dim() > 2:
- if self.with_avg_pool:
- x_cls = self.avg_pool(x_cls)
- x_cls = x_cls.flatten(1)
- for fc in self.cls_fcs[k]:
- x_cls = self.relu(fc(x_cls))
-
- for conv in self.reg_convs[k]:
- x_reg = conv(x_reg)
- if x_reg.dim() > 2:
- if self.with_avg_pool:
- x_reg = self.avg_pool(x_reg)
- x_reg = x_reg.flatten(1)
- for fc in self.reg_fcs[k]:
- x_reg = self.relu(fc(x_reg))
-
- cls_score.append(self.fc_cls[k](x_cls) if self.with_cls else None)
- bbox_pred.append(self.fc_reg[k](x_reg) if self.with_reg else None)
-
- if self.with_refine:
- x_ref = x
- cls_score_ref = list()
- bbox_pred_ref = list()
- for k in range(self.num_instance):
- feat_ref = cls_score[k].softmax(dim=-1)
- feat_ref = torch.cat((bbox_pred[k], feat_ref[:, 1][:, None]),
- dim=1).repeat(1, 4)
- feat_ref = torch.cat((x_ref, feat_ref), dim=1)
- feat_ref = F.relu_(self.shared_fcs_ref(feat_ref))
-
- cls_score_ref.append(self.fc_cls_ref[k](feat_ref))
- bbox_pred_ref.append(self.fc_reg_ref[k](feat_ref))
-
- cls_score = torch.cat(cls_score, dim=1)
- bbox_pred = torch.cat(bbox_pred, dim=1)
- cls_score_ref = torch.cat(cls_score_ref, dim=1)
- bbox_pred_ref = torch.cat(bbox_pred_ref, dim=1)
- return cls_score, bbox_pred, cls_score_ref, bbox_pred_ref
-
- cls_score = torch.cat(cls_score, dim=1)
- bbox_pred = torch.cat(bbox_pred, dim=1)
-
- return cls_score, bbox_pred
-
- def get_targets(self,
- sampling_results: List[SamplingResult],
- rcnn_train_cfg: ConfigDict,
- concat: bool = True) -> tuple:
- """Calculate the ground truth for all samples in a batch according to
- the sampling_results.
-
- Almost the same as the implementation in bbox_head, we passed
- additional parameters pos_inds_list and neg_inds_list to
- `_get_targets_single` function.
-
- Args:
- sampling_results (List[obj:SamplingResult]): Assign results of
- all images in a batch after sampling.
- rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.
- concat (bool): Whether to concatenate the results of all
- the images in a single batch.
-
- Returns:
- Tuple[Tensor]: Ground truth for proposals in a single image.
- Containing the following list of Tensors:
-
- - labels (list[Tensor],Tensor): Gt_labels for all proposals in a
- batch, each tensor in list has shape (num_proposals,) when
- `concat=False`, otherwise just a single tensor has shape
- (num_all_proposals,).
- - label_weights (list[Tensor]): Labels_weights for
- all proposals in a batch, each tensor in list has shape
- (num_proposals,) when `concat=False`, otherwise just a single
- tensor has shape (num_all_proposals,).
- - bbox_targets (list[Tensor],Tensor): Regression target for all
- proposals in a batch, each tensor in list has shape
- (num_proposals, 4) when `concat=False`, otherwise just a single
- tensor has shape (num_all_proposals, 4), the last dimension 4
- represents [tl_x, tl_y, br_x, br_y].
- - bbox_weights (list[tensor],Tensor): Regression weights for
- all proposals in a batch, each tensor in list has shape
- (num_proposals, 4) when `concat=False`, otherwise just a
- single tensor has shape (num_all_proposals, 4).
- """
- labels = []
- bbox_targets = []
- bbox_weights = []
- label_weights = []
- for i in range(len(sampling_results)):
- sample_bboxes = torch.cat([
- sampling_results[i].pos_gt_bboxes,
- sampling_results[i].neg_gt_bboxes
- ])
- sample_priors = sampling_results[i].priors
- sample_priors = sample_priors.repeat(1, self.num_instance).reshape(
- -1, 4)
- sample_bboxes = sample_bboxes.reshape(-1, 4)
-
- if not self.reg_decoded_bbox:
- _bbox_targets = self.bbox_coder.encode(sample_priors,
- sample_bboxes)
- else:
- _bbox_targets = sample_priors
- _bbox_targets = _bbox_targets.reshape(-1, self.num_instance * 4)
- _bbox_weights = torch.ones(_bbox_targets.shape)
- _labels = torch.cat([
- sampling_results[i].pos_gt_labels,
- sampling_results[i].neg_gt_labels
- ])
- _labels_weights = torch.ones(_labels.shape)
-
- bbox_targets.append(_bbox_targets)
- bbox_weights.append(_bbox_weights)
- labels.append(_labels)
- label_weights.append(_labels_weights)
-
- if concat:
- labels = torch.cat(labels, 0)
- label_weights = torch.cat(label_weights, 0)
- bbox_targets = torch.cat(bbox_targets, 0)
- bbox_weights = torch.cat(bbox_weights, 0)
- return labels, label_weights, bbox_targets, bbox_weights
-
- def loss(self, cls_score: Tensor, bbox_pred: Tensor, rois: Tensor,
- labels: Tensor, label_weights: Tensor, bbox_targets: Tensor,
- bbox_weights: Tensor, **kwargs) -> dict:
- """Calculate the loss based on the network predictions and targets.
-
- Args:
- cls_score (Tensor): Classification prediction results of all class,
- has shape (batch_size * num_proposals_single_image,
- (num_classes + 1) * k), k represents the number of prediction
- boxes generated by each proposal box.
- bbox_pred (Tensor): Regression prediction results, has shape
- (batch_size * num_proposals_single_image, 4 * k), the last
- dimension 4 represents [tl_x, tl_y, br_x, br_y].
- rois (Tensor): RoIs with the shape
- (batch_size * num_proposals_single_image, 5) where the first
- column indicates batch id of each RoI.
- labels (Tensor): Gt_labels for all proposals in a batch, has
- shape (batch_size * num_proposals_single_image, k).
- label_weights (Tensor): Labels_weights for all proposals in a
- batch, has shape (batch_size * num_proposals_single_image, k).
- bbox_targets (Tensor): Regression target for all proposals in a
- batch, has shape (batch_size * num_proposals_single_image,
- 4 * k), the last dimension 4 represents [tl_x, tl_y, br_x,
- br_y].
- bbox_weights (Tensor): Regression weights for all proposals in a
- batch, has shape (batch_size * num_proposals_single_image,
- 4 * k).
-
- Returns:
- dict: A dictionary of loss.
- """
- losses = dict()
- if bbox_pred.numel():
- loss_0 = self.emd_loss(bbox_pred[:, 0:4], cls_score[:, 0:2],
- bbox_pred[:, 4:8], cls_score[:, 2:4],
- bbox_targets, labels)
- loss_1 = self.emd_loss(bbox_pred[:, 4:8], cls_score[:, 2:4],
- bbox_pred[:, 0:4], cls_score[:, 0:2],
- bbox_targets, labels)
- loss = torch.cat([loss_0, loss_1], dim=1)
- _, min_indices = loss.min(dim=1)
- loss_emd = loss[torch.arange(loss.shape[0]), min_indices]
- loss_emd = loss_emd.mean()
- else:
- loss_emd = bbox_pred.sum()
- losses['loss_rcnn_emd'] = loss_emd
- return losses
-
- def emd_loss(self, bbox_pred_0: Tensor, cls_score_0: Tensor,
- bbox_pred_1: Tensor, cls_score_1: Tensor, targets: Tensor,
- labels: Tensor) -> Tensor:
- """Calculate the emd loss.
-
- Note:
- This implementation is modified from https://github.com/Purkialo/
- CrowdDet/blob/master/lib/det_oprs/loss_opr.py
-
- Args:
- bbox_pred_0 (Tensor): Part of regression prediction results, has
- shape (batch_size * num_proposals_single_image, 4), the last
- dimension 4 represents [tl_x, tl_y, br_x, br_y].
- cls_score_0 (Tensor): Part of classification prediction results,
- has shape (batch_size * num_proposals_single_image,
- (num_classes + 1)), where 1 represents the background.
- bbox_pred_1 (Tensor): The other part of regression prediction
- results, has shape (batch_size*num_proposals_single_image, 4).
- cls_score_1 (Tensor):The other part of classification prediction
- results, has shape (batch_size * num_proposals_single_image,
- (num_classes + 1)).
- targets (Tensor):Regression target for all proposals in a
- batch, has shape (batch_size * num_proposals_single_image,
- 4 * k), the last dimension 4 represents [tl_x, tl_y, br_x,
- br_y], k represents the number of prediction boxes generated
- by each proposal box.
- labels (Tensor): Gt_labels for all proposals in a batch, has
- shape (batch_size * num_proposals_single_image, k).
-
- Returns:
- torch.Tensor: The calculated loss.
- """
-
- bbox_pred = torch.cat([bbox_pred_0, bbox_pred_1],
- dim=1).reshape(-1, bbox_pred_0.shape[-1])
- cls_score = torch.cat([cls_score_0, cls_score_1],
- dim=1).reshape(-1, cls_score_0.shape[-1])
- targets = targets.reshape(-1, 4)
- labels = labels.long().flatten()
-
- # masks
- valid_masks = labels >= 0
- fg_masks = labels > 0
-
- # multiple class
- bbox_pred = bbox_pred.reshape(-1, self.num_classes, 4)
- fg_gt_classes = labels[fg_masks]
- bbox_pred = bbox_pred[fg_masks, fg_gt_classes - 1, :]
-
- # loss for regression
- loss_bbox = self.loss_bbox(bbox_pred, targets[fg_masks])
- loss_bbox = loss_bbox.sum(dim=1)
-
- # loss for classification
- labels = labels * valid_masks
- loss_cls = self.loss_cls(cls_score, labels)
-
- loss_cls[fg_masks] = loss_cls[fg_masks] + loss_bbox
- loss = loss_cls.reshape(-1, 2).sum(dim=1)
- return loss.reshape(-1, 1)
-
- def _predict_by_feat_single(
- self,
- roi: Tensor,
- cls_score: Tensor,
- bbox_pred: Tensor,
- img_meta: dict,
- rescale: bool = False,
- rcnn_test_cfg: Optional[ConfigDict] = None) -> InstanceData:
- """Transform a single image's features extracted from the head into
- bbox results.
-
- Args:
- roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5).
- last dimension 5 arrange as (batch_index, x1, y1, x2, y2).
- cls_score (Tensor): Box scores, has shape
- (num_boxes, num_classes + 1).
- bbox_pred (Tensor): Box energies / deltas. has shape
- (num_boxes, num_classes * 4).
- img_meta (dict): image information.
- rescale (bool): If True, return boxes in original image space.
- Defaults to False.
- rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head.
- Defaults to None
-
- Returns:
- :obj:`InstanceData`: Detection results of each image.
- Each item usually contains following keys.
-
- - scores (Tensor): Classification scores, has a shape
- (num_instance, )
- - labels (Tensor): Labels of bboxes, has a shape
- (num_instances, ).
- - bboxes (Tensor): Has a shape (num_instances, 4),
- the last dimension 4 arrange as (x1, y1, x2, y2).
- """
-
- cls_score = cls_score.reshape(-1, self.num_classes + 1)
- bbox_pred = bbox_pred.reshape(-1, 4)
- roi = roi.repeat_interleave(self.num_instance, dim=0)
-
- results = InstanceData()
- if roi.shape[0] == 0:
- return empty_instances([img_meta],
- roi.device,
- task_type='bbox',
- instance_results=[results])[0]
-
- scores = cls_score.softmax(dim=-1) if cls_score is not None else None
- img_shape = img_meta['img_shape']
- bboxes = self.bbox_coder.decode(
- roi[..., 1:], bbox_pred, max_shape=img_shape)
-
- if rescale and bboxes.size(0) > 0:
- assert img_meta.get('scale_factor') is not None
- scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat(
- (1, 2))
- bboxes = (bboxes.view(bboxes.size(0), -1, 4) / scale_factor).view(
- bboxes.size()[0], -1)
-
- if rcnn_test_cfg is None:
- # This means that it is aug test.
- # It needs to return the raw results without nms.
- results.bboxes = bboxes
- results.scores = scores
- else:
- roi_idx = np.tile(
- np.arange(bboxes.shape[0] / self.num_instance)[:, None],
- (1, self.num_instance)).reshape(-1, 1)[:, 0]
- roi_idx = torch.from_numpy(roi_idx).to(bboxes.device).reshape(
- -1, 1)
- bboxes = torch.cat([bboxes, roi_idx], dim=1)
- det_bboxes, det_scores = self.set_nms(
- bboxes, scores[:, 1], rcnn_test_cfg.score_thr,
- rcnn_test_cfg.nms['iou_threshold'], rcnn_test_cfg.max_per_img)
-
- results.bboxes = det_bboxes[:, :-1]
- results.scores = det_scores
- results.labels = torch.zeros_like(det_scores)
-
- return results
-
- @staticmethod
- def set_nms(bboxes: Tensor,
- scores: Tensor,
- score_thr: float,
- iou_threshold: float,
- max_num: int = -1) -> Tuple[Tensor, Tensor]:
- """NMS for multi-instance prediction. Please refer to
- https://github.com/Purkialo/CrowdDet for more details.
-
- Args:
- bboxes (Tensor): predict bboxes.
- scores (Tensor): The score of each predict bbox.
- score_thr (float): bbox threshold, bboxes with scores lower than it
- will not be considered.
- iou_threshold (float): IoU threshold to be considered as
- conflicted.
- max_num (int, optional): if there are more than max_num bboxes
- after NMS, only top max_num will be kept. Default to -1.
-
- Returns:
- Tuple[Tensor, Tensor]: (bboxes, scores).
- """
-
- bboxes = bboxes[scores > score_thr]
- scores = scores[scores > score_thr]
-
- ordered_scores, order = scores.sort(descending=True)
- ordered_bboxes = bboxes[order]
- roi_idx = ordered_bboxes[:, -1]
-
- keep = torch.ones(len(ordered_bboxes)) == 1
- ruler = torch.arange(len(ordered_bboxes))
- while ruler.shape[0] > 0:
- basement = ruler[0]
- ruler = ruler[1:]
- idx = roi_idx[basement]
- # calculate the body overlap
- basement_bbox = ordered_bboxes[:, :4][basement].reshape(-1, 4)
- ruler_bbox = ordered_bboxes[:, :4][ruler].reshape(-1, 4)
- overlap = bbox_overlaps(basement_bbox, ruler_bbox)
- indices = torch.where(overlap > iou_threshold)[1]
- loc = torch.where(roi_idx[ruler][indices] == idx)
- # the mask won't change in the step
- mask = keep[ruler[indices][loc]]
- keep[ruler[indices]] = False
- keep[ruler[indices][loc][mask]] = True
- ruler[~keep[ruler]] = -1
- ruler = ruler[ruler > 0]
-
- keep = keep[order.sort()[1]]
- return bboxes[keep][:max_num, :], scores[keep][:max_num]
diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/mask_heads/grid_head.py b/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/mask_heads/grid_head.py
deleted file mode 100644
index d9514ae7bcfc1b7d5613fa0107e9bd087e13dd46..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/mask_heads/grid_head.py
+++ /dev/null
@@ -1,490 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from typing import Dict, List, Tuple
-
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from mmcv.cnn import ConvModule
-from mmengine.config import ConfigDict
-from mmengine.model import BaseModule
-from mmengine.structures import InstanceData
-from torch import Tensor
-
-from mmdet.models.task_modules.samplers import SamplingResult
-from mmdet.registry import MODELS
-from mmdet.utils import ConfigType, InstanceList, MultiConfig, OptConfigType
-
-
-@MODELS.register_module()
-class GridHead(BaseModule):
- """Implementation of `Grid Head `_
-
- Args:
- grid_points (int): The number of grid points. Defaults to 9.
- num_convs (int): The number of convolution layers. Defaults to 8.
- roi_feat_size (int): RoI feature size. Default to 14.
- in_channels (int): The channel number of inputs features.
- Defaults to 256.
- conv_kernel_size (int): The kernel size of convolution layers.
- Defaults to 3.
- point_feat_channels (int): The number of channels of each point
- features. Defaults to 64.
- class_agnostic (bool): Whether use class agnostic classification.
- If so, the output channels of logits will be 1. Defaults to False.
- loss_grid (:obj:`ConfigDict` or dict): Config of grid loss.
- conv_cfg (:obj:`ConfigDict` or dict, optional) dictionary to
- construct and config conv layer.
- norm_cfg (:obj:`ConfigDict` or dict): dictionary to construct and
- config norm layer.
- init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
- dict]): Initialization config dict.
- """
-
- def __init__(
- self,
- grid_points: int = 9,
- num_convs: int = 8,
- roi_feat_size: int = 14,
- in_channels: int = 256,
- conv_kernel_size: int = 3,
- point_feat_channels: int = 64,
- deconv_kernel_size: int = 4,
- class_agnostic: bool = False,
- loss_grid: ConfigType = dict(
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=15),
- conv_cfg: OptConfigType = None,
- norm_cfg: ConfigType = dict(type='GN', num_groups=36),
- init_cfg: MultiConfig = [
- dict(type='Kaiming', layer=['Conv2d', 'Linear']),
- dict(
- type='Normal',
- layer='ConvTranspose2d',
- std=0.001,
- override=dict(
- type='Normal',
- name='deconv2',
- std=0.001,
- bias=-np.log(0.99 / 0.01)))
- ]
- ) -> None:
- super().__init__(init_cfg=init_cfg)
- self.grid_points = grid_points
- self.num_convs = num_convs
- self.roi_feat_size = roi_feat_size
- self.in_channels = in_channels
- self.conv_kernel_size = conv_kernel_size
- self.point_feat_channels = point_feat_channels
- self.conv_out_channels = self.point_feat_channels * self.grid_points
- self.class_agnostic = class_agnostic
- self.conv_cfg = conv_cfg
- self.norm_cfg = norm_cfg
- if isinstance(norm_cfg, dict) and norm_cfg['type'] == 'GN':
- assert self.conv_out_channels % norm_cfg['num_groups'] == 0
-
- assert self.grid_points >= 4
- self.grid_size = int(np.sqrt(self.grid_points))
- if self.grid_size * self.grid_size != self.grid_points:
- raise ValueError('grid_points must be a square number')
-
- # the predicted heatmap is half of whole_map_size
- if not isinstance(self.roi_feat_size, int):
- raise ValueError('Only square RoIs are supporeted in Grid R-CNN')
- self.whole_map_size = self.roi_feat_size * 4
-
- # compute point-wise sub-regions
- self.sub_regions = self.calc_sub_regions()
-
- self.convs = []
- for i in range(self.num_convs):
- in_channels = (
- self.in_channels if i == 0 else self.conv_out_channels)
- stride = 2 if i == 0 else 1
- padding = (self.conv_kernel_size - 1) // 2
- self.convs.append(
- ConvModule(
- in_channels,
- self.conv_out_channels,
- self.conv_kernel_size,
- stride=stride,
- padding=padding,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- bias=True))
- self.convs = nn.Sequential(*self.convs)
-
- self.deconv1 = nn.ConvTranspose2d(
- self.conv_out_channels,
- self.conv_out_channels,
- kernel_size=deconv_kernel_size,
- stride=2,
- padding=(deconv_kernel_size - 2) // 2,
- groups=grid_points)
- self.norm1 = nn.GroupNorm(grid_points, self.conv_out_channels)
- self.deconv2 = nn.ConvTranspose2d(
- self.conv_out_channels,
- grid_points,
- kernel_size=deconv_kernel_size,
- stride=2,
- padding=(deconv_kernel_size - 2) // 2,
- groups=grid_points)
-
- # find the 4-neighbor of each grid point
- self.neighbor_points = []
- grid_size = self.grid_size
- for i in range(grid_size): # i-th column
- for j in range(grid_size): # j-th row
- neighbors = []
- if i > 0: # left: (i - 1, j)
- neighbors.append((i - 1) * grid_size + j)
- if j > 0: # up: (i, j - 1)
- neighbors.append(i * grid_size + j - 1)
- if j < grid_size - 1: # down: (i, j + 1)
- neighbors.append(i * grid_size + j + 1)
- if i < grid_size - 1: # right: (i + 1, j)
- neighbors.append((i + 1) * grid_size + j)
- self.neighbor_points.append(tuple(neighbors))
- # total edges in the grid
- self.num_edges = sum([len(p) for p in self.neighbor_points])
-
- self.forder_trans = nn.ModuleList() # first-order feature transition
- self.sorder_trans = nn.ModuleList() # second-order feature transition
- for neighbors in self.neighbor_points:
- fo_trans = nn.ModuleList()
- so_trans = nn.ModuleList()
- for _ in range(len(neighbors)):
- # each transition module consists of a 5x5 depth-wise conv and
- # 1x1 conv.
- fo_trans.append(
- nn.Sequential(
- nn.Conv2d(
- self.point_feat_channels,
- self.point_feat_channels,
- 5,
- stride=1,
- padding=2,
- groups=self.point_feat_channels),
- nn.Conv2d(self.point_feat_channels,
- self.point_feat_channels, 1)))
- so_trans.append(
- nn.Sequential(
- nn.Conv2d(
- self.point_feat_channels,
- self.point_feat_channels,
- 5,
- 1,
- 2,
- groups=self.point_feat_channels),
- nn.Conv2d(self.point_feat_channels,
- self.point_feat_channels, 1)))
- self.forder_trans.append(fo_trans)
- self.sorder_trans.append(so_trans)
-
- self.loss_grid = MODELS.build(loss_grid)
-
- def forward(self, x: Tensor) -> Dict[str, Tensor]:
- """forward function of ``GridHead``.
-
- Args:
- x (Tensor): RoI features, has shape
- (num_rois, num_channels, roi_feat_size, roi_feat_size).
-
- Returns:
- Dict[str, Tensor]: Return a dict including fused and unfused
- heatmap.
- """
- assert x.shape[-1] == x.shape[-2] == self.roi_feat_size
- # RoI feature transformation, downsample 2x
- x = self.convs(x)
-
- c = self.point_feat_channels
- # first-order fusion
- x_fo = [None for _ in range(self.grid_points)]
- for i, points in enumerate(self.neighbor_points):
- x_fo[i] = x[:, i * c:(i + 1) * c]
- for j, point_idx in enumerate(points):
- x_fo[i] = x_fo[i] + self.forder_trans[i][j](
- x[:, point_idx * c:(point_idx + 1) * c])
-
- # second-order fusion
- x_so = [None for _ in range(self.grid_points)]
- for i, points in enumerate(self.neighbor_points):
- x_so[i] = x[:, i * c:(i + 1) * c]
- for j, point_idx in enumerate(points):
- x_so[i] = x_so[i] + self.sorder_trans[i][j](x_fo[point_idx])
-
- # predicted heatmap with fused features
- x2 = torch.cat(x_so, dim=1)
- x2 = self.deconv1(x2)
- x2 = F.relu(self.norm1(x2), inplace=True)
- heatmap = self.deconv2(x2)
-
- # predicted heatmap with original features (applicable during training)
- if self.training:
- x1 = x
- x1 = self.deconv1(x1)
- x1 = F.relu(self.norm1(x1), inplace=True)
- heatmap_unfused = self.deconv2(x1)
- else:
- heatmap_unfused = heatmap
-
- return dict(fused=heatmap, unfused=heatmap_unfused)
-
- def calc_sub_regions(self) -> List[Tuple[float]]:
- """Compute point specific representation regions.
-
- See `Grid R-CNN Plus `_ for details.
- """
- # to make it consistent with the original implementation, half_size
- # is computed as 2 * quarter_size, which is smaller
- half_size = self.whole_map_size // 4 * 2
- sub_regions = []
- for i in range(self.grid_points):
- x_idx = i // self.grid_size
- y_idx = i % self.grid_size
- if x_idx == 0:
- sub_x1 = 0
- elif x_idx == self.grid_size - 1:
- sub_x1 = half_size
- else:
- ratio = x_idx / (self.grid_size - 1) - 0.25
- sub_x1 = max(int(ratio * self.whole_map_size), 0)
-
- if y_idx == 0:
- sub_y1 = 0
- elif y_idx == self.grid_size - 1:
- sub_y1 = half_size
- else:
- ratio = y_idx / (self.grid_size - 1) - 0.25
- sub_y1 = max(int(ratio * self.whole_map_size), 0)
- sub_regions.append(
- (sub_x1, sub_y1, sub_x1 + half_size, sub_y1 + half_size))
- return sub_regions
-
- def get_targets(self, sampling_results: List[SamplingResult],
- rcnn_train_cfg: ConfigDict) -> Tensor:
- """Calculate the ground truth for all samples in a batch according to
- the sampling_results.".
-
- Args:
- sampling_results (List[:obj:`SamplingResult`]): Assign results of
- all images in a batch after sampling.
- rcnn_train_cfg (:obj:`ConfigDict`): `train_cfg` of RCNN.
-
- Returns:
- Tensor: Grid heatmap targets.
- """
- # mix all samples (across images) together.
- pos_bboxes = torch.cat([res.pos_bboxes for res in sampling_results],
- dim=0).cpu()
- pos_gt_bboxes = torch.cat(
- [res.pos_gt_bboxes for res in sampling_results], dim=0).cpu()
- assert pos_bboxes.shape == pos_gt_bboxes.shape
-
- # expand pos_bboxes to 2x of original size
- x1 = pos_bboxes[:, 0] - (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2
- y1 = pos_bboxes[:, 1] - (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2
- x2 = pos_bboxes[:, 2] + (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2
- y2 = pos_bboxes[:, 3] + (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2
- pos_bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
- pos_bbox_ws = (pos_bboxes[:, 2] - pos_bboxes[:, 0]).unsqueeze(-1)
- pos_bbox_hs = (pos_bboxes[:, 3] - pos_bboxes[:, 1]).unsqueeze(-1)
-
- num_rois = pos_bboxes.shape[0]
- map_size = self.whole_map_size
- # this is not the final target shape
- targets = torch.zeros((num_rois, self.grid_points, map_size, map_size),
- dtype=torch.float)
-
- # pre-compute interpolation factors for all grid points.
- # the first item is the factor of x-dim, and the second is y-dim.
- # for a 9-point grid, factors are like (1, 0), (0.5, 0.5), (0, 1)
- factors = []
- for j in range(self.grid_points):
- x_idx = j // self.grid_size
- y_idx = j % self.grid_size
- factors.append((1 - x_idx / (self.grid_size - 1),
- 1 - y_idx / (self.grid_size - 1)))
-
- radius = rcnn_train_cfg.pos_radius
- radius2 = radius**2
- for i in range(num_rois):
- # ignore small bboxes
- if (pos_bbox_ws[i] <= self.grid_size
- or pos_bbox_hs[i] <= self.grid_size):
- continue
- # for each grid point, mark a small circle as positive
- for j in range(self.grid_points):
- factor_x, factor_y = factors[j]
- gridpoint_x = factor_x * pos_gt_bboxes[i, 0] + (
- 1 - factor_x) * pos_gt_bboxes[i, 2]
- gridpoint_y = factor_y * pos_gt_bboxes[i, 1] + (
- 1 - factor_y) * pos_gt_bboxes[i, 3]
-
- cx = int((gridpoint_x - pos_bboxes[i, 0]) / pos_bbox_ws[i] *
- map_size)
- cy = int((gridpoint_y - pos_bboxes[i, 1]) / pos_bbox_hs[i] *
- map_size)
-
- for x in range(cx - radius, cx + radius + 1):
- for y in range(cy - radius, cy + radius + 1):
- if x >= 0 and x < map_size and y >= 0 and y < map_size:
- if (x - cx)**2 + (y - cy)**2 <= radius2:
- targets[i, j, y, x] = 1
- # reduce the target heatmap size by a half
- # proposed in Grid R-CNN Plus (https://arxiv.org/abs/1906.05688).
- sub_targets = []
- for i in range(self.grid_points):
- sub_x1, sub_y1, sub_x2, sub_y2 = self.sub_regions[i]
- sub_targets.append(targets[:, [i], sub_y1:sub_y2, sub_x1:sub_x2])
- sub_targets = torch.cat(sub_targets, dim=1)
- sub_targets = sub_targets.to(sampling_results[0].pos_bboxes.device)
- return sub_targets
-
- def loss(self, grid_pred: Tensor, sample_idx: Tensor,
- sampling_results: List[SamplingResult],
- rcnn_train_cfg: ConfigDict) -> dict:
- """Calculate the loss based on the features extracted by the grid head.
-
- Args:
- grid_pred (dict[str, Tensor]): Outputs of grid_head forward.
- sample_idx (Tensor): The sampling index of ``grid_pred``.
- sampling_results (List[obj:SamplingResult]): Assign results of
- all images in a batch after sampling.
- rcnn_train_cfg (obj:`ConfigDict`): `train_cfg` of RCNN.
-
- Returns:
- dict: A dictionary of loss and targets components.
- """
- grid_targets = self.get_targets(sampling_results, rcnn_train_cfg)
- grid_targets = grid_targets[sample_idx]
-
- loss_fused = self.loss_grid(grid_pred['fused'], grid_targets)
- loss_unfused = self.loss_grid(grid_pred['unfused'], grid_targets)
- loss_grid = loss_fused + loss_unfused
- return dict(loss_grid=loss_grid)
-
- def predict_by_feat(self,
- grid_preds: Dict[str, Tensor],
- results_list: List[InstanceData],
- batch_img_metas: List[dict],
- rescale: bool = False) -> InstanceList:
- """Adjust the predicted bboxes from bbox head.
-
- Args:
- grid_preds (dict[str, Tensor]): dictionary outputted by forward
- function.
- results_list (list[:obj:`InstanceData`]): Detection results of
- each image.
- batch_img_metas (list[dict]): List of image information.
- rescale (bool): If True, return boxes in original image space.
- Defaults to False.
-
- Returns:
- list[:obj:`InstanceData`]: Detection results of each image
- after the post process. Each item usually contains following keys.
-
- - scores (Tensor): Classification scores, has a shape \
- (num_instance, )
- - labels (Tensor): Labels of bboxes, has a shape (num_instances, ).
- - bboxes (Tensor): Has a shape (num_instances, 4), the last \
- dimension 4 arrange as (x1, y1, x2, y2).
- """
- num_roi_per_img = tuple(res.bboxes.size(0) for res in results_list)
- grid_preds = {
- k: v.split(num_roi_per_img, 0)
- for k, v in grid_preds.items()
- }
-
- for i, results in enumerate(results_list):
- if len(results) != 0:
- bboxes = self._predict_by_feat_single(
- grid_pred=grid_preds['fused'][i],
- bboxes=results.bboxes,
- img_meta=batch_img_metas[i],
- rescale=rescale)
- results.bboxes = bboxes
- return results_list
-
- def _predict_by_feat_single(self,
- grid_pred: Tensor,
- bboxes: Tensor,
- img_meta: dict,
- rescale: bool = False) -> Tensor:
- """Adjust ``bboxes`` according to ``grid_pred``.
-
- Args:
- grid_pred (Tensor): Grid fused heatmap.
- bboxes (Tensor): Predicted bboxes, has shape (n, 4)
- img_meta (dict): image information.
- rescale (bool): If True, return boxes in original image space.
- Defaults to False.
-
- Returns:
- Tensor: adjusted bboxes.
- """
- assert bboxes.size(0) == grid_pred.size(0)
- grid_pred = grid_pred.sigmoid()
-
- R, c, h, w = grid_pred.shape
- half_size = self.whole_map_size // 4 * 2
- assert h == w == half_size
- assert c == self.grid_points
-
- # find the point with max scores in the half-sized heatmap
- grid_pred = grid_pred.view(R * c, h * w)
- pred_scores, pred_position = grid_pred.max(dim=1)
- xs = pred_position % w
- ys = pred_position // w
-
- # get the position in the whole heatmap instead of half-sized heatmap
- for i in range(self.grid_points):
- xs[i::self.grid_points] += self.sub_regions[i][0]
- ys[i::self.grid_points] += self.sub_regions[i][1]
-
- # reshape to (num_rois, grid_points)
- pred_scores, xs, ys = tuple(
- map(lambda x: x.view(R, c), [pred_scores, xs, ys]))
-
- # get expanded pos_bboxes
- widths = (bboxes[:, 2] - bboxes[:, 0]).unsqueeze(-1)
- heights = (bboxes[:, 3] - bboxes[:, 1]).unsqueeze(-1)
- x1 = (bboxes[:, 0, None] - widths / 2)
- y1 = (bboxes[:, 1, None] - heights / 2)
- # map the grid point to the absolute coordinates
- abs_xs = (xs.float() + 0.5) / w * widths + x1
- abs_ys = (ys.float() + 0.5) / h * heights + y1
-
- # get the grid points indices that fall on the bbox boundaries
- x1_inds = [i for i in range(self.grid_size)]
- y1_inds = [i * self.grid_size for i in range(self.grid_size)]
- x2_inds = [
- self.grid_points - self.grid_size + i
- for i in range(self.grid_size)
- ]
- y2_inds = [(i + 1) * self.grid_size - 1 for i in range(self.grid_size)]
-
- # voting of all grid points on some boundary
- bboxes_x1 = (abs_xs[:, x1_inds] * pred_scores[:, x1_inds]).sum(
- dim=1, keepdim=True) / (
- pred_scores[:, x1_inds].sum(dim=1, keepdim=True))
- bboxes_y1 = (abs_ys[:, y1_inds] * pred_scores[:, y1_inds]).sum(
- dim=1, keepdim=True) / (
- pred_scores[:, y1_inds].sum(dim=1, keepdim=True))
- bboxes_x2 = (abs_xs[:, x2_inds] * pred_scores[:, x2_inds]).sum(
- dim=1, keepdim=True) / (
- pred_scores[:, x2_inds].sum(dim=1, keepdim=True))
- bboxes_y2 = (abs_ys[:, y2_inds] * pred_scores[:, y2_inds]).sum(
- dim=1, keepdim=True) / (
- pred_scores[:, y2_inds].sum(dim=1, keepdim=True))
-
- bboxes = torch.cat([bboxes_x1, bboxes_y1, bboxes_x2, bboxes_y2], dim=1)
- bboxes[:, [0, 2]].clamp_(min=0, max=img_meta['img_shape'][1])
- bboxes[:, [1, 3]].clamp_(min=0, max=img_meta['img_shape'][0])
-
- if rescale:
- assert img_meta.get('scale_factor') is not None
- bboxes /= bboxes.new_tensor(img_meta['scale_factor']).repeat(
- (1, 2))
-
- return bboxes
diff --git a/spaces/KyanChen/RSPrompter/mmpl/models/__init__.py b/spaces/KyanChen/RSPrompter/mmpl/models/__init__.py
deleted file mode 100644
index 4c0806f5b20bdf732ff752fe0937550484870f4b..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmpl/models/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-from .builder import build_pler
-from .pler import *
-from .backbones import *
-from .losses import *
-from .heads import *
-from .necks import *
-from .data_preprocessors import *
-
-__all__ = ['build_pler']
\ No newline at end of file
diff --git a/spaces/KyanChen/RSPrompter/mmpl/structures/multi_task_data_sample.py b/spaces/KyanChen/RSPrompter/mmpl/structures/multi_task_data_sample.py
deleted file mode 100644
index f00993861bfb4f35fb7d145198f81c5e9f0a5993..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmpl/structures/multi_task_data_sample.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-
-from mmengine.structures import BaseDataElement
-
-
-class MultiTaskDataSample(BaseDataElement):
-
- @property
- def tasks(self):
- return self._data_fields
diff --git a/spaces/Lavanya30/hiddenhunger/pages/human.py b/spaces/Lavanya30/hiddenhunger/pages/human.py
deleted file mode 100644
index f73b2a86ab38f2794e3a57bc20e6004c0589e1d4..0000000000000000000000000000000000000000
--- a/spaces/Lavanya30/hiddenhunger/pages/human.py
+++ /dev/null
@@ -1,102 +0,0 @@
-import cv2
-import numpy as np
-import streamlit as st
-import tensorflow as tf
-from tensorflow.keras.preprocessing import image
-from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2,preprocess_input as mobilenet_v2_preprocess_input
-st.header("Hidden Hunger")
-st.write("To find micronutrient deficiency in human using the images of nails and eyes")
-st.markdown("""
-
-""", unsafe_allow_html=True)
-uploaded_file = st.file_uploader("Choose a image file", type="jpg")
-model = tf.keras.models.load_model(r"models/resnet152v2nail.h5")
-camera=st.button("Capture")
-if camera:
- # Function to process each frame of the video
- def process_frame(frame):
- # Convert the frame to grayscale
- gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
-
- # Apply a threshold to the grayscale image
- _, thresh = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY)
-
- return thresh
-
-# Create a VideoCapture object to capture video from the camera
- cap = cv2.VideoCapture(0)
-
-# Set the dimensions of the video capture window
- cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
- cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
-
-# Define a function to capture video and display the results in the Streamlit app
- def capture_video():
- while True:
- # Read a frame from the camera
- ret, frame = cap.read()
-
- # Process the frame
- processed_frame = process_frame(frame)
-
- # Display the original and processed frames in the Streamlit app
- st.image(np.hstack((frame, processed_frame)), width=640)
-
- # Check if the user has pressed the "Stop" button
- if st.button('Stop'):
- break
-
-# Call the function to capture video and display the results in the Streamlit app
- capture_video()
-
-# Release the VideoCapture object and close the window
- cap.release()
- cv2.destroyAllWindows()
-map_dict = {0: 'Iodine deficiency',
- 1: 'Vitamin B12 deficiency',
- 2: 'Vitamin D deficiency',
- 3: 'Zinc deficiency',
- 4: 'Healthy',
- 5: 'Iron deficiency'}
-
-if uploaded_file is not None:
- # Convert the file to an opencv image.
- file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
- opencv_image = cv2.imdecode(file_bytes, 1)
- opencv_image = cv2.cvtColor(opencv_image, cv2.COLOR_BGR2RGB)
- resized = cv2.resize(opencv_image,(224,224))
- # Now do something with the image! For example, let's display it:
- st.image(opencv_image, channels="RGB")
-
- resized = mobilenet_v2_preprocess_input(resized)
- img_reshape = resized[np.newaxis,...]
-
- Genrate_pred = st.button("Generate Prediction")
- if Genrate_pred:
- prediction = model.predict(img_reshape).argmax()
- st.title("Predicted Label for the image is {}".format(map_dict [prediction]))
-
diff --git a/spaces/Lbin123/Lbingo/src/components/tone-selector.tsx b/spaces/Lbin123/Lbingo/src/components/tone-selector.tsx
deleted file mode 100644
index 5c6e464c91f564b895acd121f0a4a79ed9c5c356..0000000000000000000000000000000000000000
--- a/spaces/Lbin123/Lbingo/src/components/tone-selector.tsx
+++ /dev/null
@@ -1,43 +0,0 @@
-import React from 'react'
-import { BingConversationStyle } from '@/lib/bots/bing/types'
-import { cn } from '@/lib/utils'
-
-type ToneItem = {
- type: BingConversationStyle,
- name: string
-}
-
-const ToneList: ToneItem[] = [
- { name: '有创造力', type: BingConversationStyle.Creative },
- { name: '更平衡', type: BingConversationStyle.Balanced },
- { name: '更精确', type: BingConversationStyle.Precise }
-]
-
-interface ToneSelectorProps {
- type: BingConversationStyle | ''
- onChange?: (type: BingConversationStyle) => void
-}
-
-export function ToneSelector({ type, onChange }: ToneSelectorProps) {
- return (
-
- Streaming / Unlimited conversations / Save history / Preset prompts / Chat with files / Web search
- LaTeX rendering / Table rendering / Code highlighting
- Auto dark mode / Adaptive web interface / WeChat-like theme
- Multi-parameters tuning / Multi-API-Key support / Multi-user support
- Compatible with GPT-4 / Local deployment for LLMs
-
-
-## Usage Tips
-
-- To better control the ChatGPT, use System Prompt.
-- To use a Prompt Template, select the Prompt Template Collection file first, and then choose certain prompt from the drop-down menu.
-- To try again if the response is unsatisfactory, use `🔄 Regenerate` button.
-- To start a new line in the input box, press Shift + Enter keys.
-- To quickly switch between input history, press ↑ and ↓ key in the input box.
-- To deploy the program onto a server, change the last line of the program to `demo.launch(server_name="0.0.0.0", server_port=)`.
-- To get a public shared link, change the last line of the program to `demo.launch(share=True)`. Please be noted that the program must be running in order to be accessed via a public link.
-- To use it in Hugging Face Spaces: It is recommended to **Duplicate Space** and run the program in your own Space for a faster and more secure experience.
-
-## Installation
-
-```shell
-git clone https://github.com/GaiZhenbiao/ChuanhuChatGPT.git
-cd ChuanhuChatGPT
-pip install -r requirements.txt
-```
-
-Then make a copy of `config_example.json`, rename it to `config.json`, and then fill in your API-Key and other settings in the file.
-
-```shell
-python ChuanhuChatbot.py
-```
-
-A browser window will open and you will be able to chat with ChatGPT.
-
-> **Note**
->
-> Please check our [wiki page](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程) for detailed instructions.
-
-## Troubleshooting
-
-When you encounter problems, you should try manually pulling the latest changes of this project first. The steps are as follows:
-
-1. Download the latest code archive by clicking on `Download ZIP` on the webpage, or
- ```shell
- git pull https://github.com/GaiZhenbiao/ChuanhuChatGPT.git main -f
- ```
-2. Try installing the dependencies again (as this project may have introduced new dependencies)
- ```
- pip install -r requirements.txt
- ```
-3. Update Gradio
- ```
- pip install gradio --upgrade --force-reinstall
- ```
-
-Generally, you can solve most problems by following these steps.
-
-If the problem still exists, please refer to this page: [Frequently Asked Questions (FAQ)](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/常见问题)
-
-This page lists almost all the possible problems and solutions. Please read it carefully.
-
-## More Information
-
-More information could be found in our [wiki](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki):
-
-- [How to contribute a translation](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/Localization)
-- [How to make a contribution](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/贡献指南)
-- [How to cite the project](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用许可#如何引用该项目)
-- [Project changelog](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/更新日志)
-- [Project license](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用许可)
-
-## Starchart
-
-[](https://star-history.com/#GaiZhenbiao/ChuanhuChatGPT&Date)
-
-## Contributors
-
-
-
-
-
-## Sponsor
-
-🐯 If you find this project helpful, feel free to buy me a coke or a cup of coffee~
-
-
-
-
diff --git a/spaces/Mackiemetal/dreamlike-photoreal-2.0/app.py b/spaces/Mackiemetal/dreamlike-photoreal-2.0/app.py
deleted file mode 100644
index ebdb5095a0691dadeebfbd16dfdfeb5fa95a0400..0000000000000000000000000000000000000000
--- a/spaces/Mackiemetal/dreamlike-photoreal-2.0/app.py
+++ /dev/null
@@ -1,137 +0,0 @@
-from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler
-import gradio as gr
-import torch
-from PIL import Image
-
-model_id = 'dreamlike-art/dreamlike-photoreal-2.0'
-prefix = ''
-
-scheduler = DPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler")
-
-pipe = StableDiffusionPipeline.from_pretrained(
- model_id,
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
- scheduler=scheduler)
-
-pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(
- model_id,
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
- scheduler=scheduler)
-
-if torch.cuda.is_available():
- pipe = pipe.to("cuda")
- pipe_i2i = pipe_i2i.to("cuda")
-
-def error_str(error, title="Error"):
- return f"""#### {title}
- {error}""" if error else ""
-
-def inference(prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt="", auto_prefix=False):
-
- generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
- prompt = f"{prefix} {prompt}" if auto_prefix else prompt
-
- try:
- if img is not None:
- return img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator), None
- else:
- return txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator), None
- except Exception as e:
- return None, error_str(e)
-
-def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator):
-
- result = pipe(
- prompt,
- negative_prompt = neg_prompt,
- num_inference_steps = int(steps),
- guidance_scale = guidance,
- width = width,
- height = height,
- generator = generator)
-
- return result.images[0]
-
-def img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator):
-
- ratio = min(height / img.height, width / img.width)
- img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
- result = pipe_i2i(
- prompt,
- negative_prompt = neg_prompt,
- init_image = img,
- num_inference_steps = int(steps),
- strength = strength,
- guidance_scale = guidance,
- width = width,
- height = height,
- generator = generator)
-
- return result.images[0]
-
-css = """.main-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.main-div div h1{font-weight:900;margin-bottom:7px}.main-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
-"""
-with gr.Blocks(css=css) as demo:
- gr.HTML(
- f"""
-
-
-
Dreamlike Photoreal 2.0
-
-
- Demo for Dreamlike Photoreal 2.0 Stable Diffusion model.
- {"Add the following tokens to your prompts for the model to work properly: prefix" if prefix else ""}
-
- Running on {"GPU 🔥" if torch.cuda.is_available() else f"CPU 🥶. For faster inference it is recommended to upgrade to GPU in Settings"} after duplicating the space
- """)
-
-demo.queue(concurrency_count=1)
-demo.launch()
diff --git a/spaces/Moxxie-nolastname/Not-Moxxie-Proxy/Dockerfile b/spaces/Moxxie-nolastname/Not-Moxxie-Proxy/Dockerfile
deleted file mode 100644
index 4cb0ce42128d9a2ad33a395883f5e5455a38c707..0000000000000000000000000000000000000000
--- a/spaces/Moxxie-nolastname/Not-Moxxie-Proxy/Dockerfile
+++ /dev/null
@@ -1,11 +0,0 @@
-FROM node:18-bullseye-slim
-RUN apt-get update && \
- apt-get install -y git
-RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
-WORKDIR /app
-RUN npm install
-COPY Dockerfile greeting.md* .env* ./
-RUN npm run build
-EXPOSE 7860
-ENV NODE_ENV=production
-CMD [ "npm", "start" ]
\ No newline at end of file
diff --git a/spaces/Mradul/mlrc-bana/README.md b/spaces/Mradul/mlrc-bana/README.md
deleted file mode 100644
index 9dd04df84cbf56fccb03cb401ba1b76db674f792..0000000000000000000000000000000000000000
--- a/spaces/Mradul/mlrc-bana/README.md
+++ /dev/null
@@ -1,37 +0,0 @@
----
-title: Mlrc Bana
-emoji: 🐢
-colorFrom: pink
-colorTo: pink
-sdk: streamlit
-app_file: app.py
-pinned: false
----
-
-# Configuration
-
-`title`: _string_
-Display title for the Space
-
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_
-Can be either `gradio` or `streamlit`
-
-`sdk_version` : _string_
-Only applicable for `streamlit` SDK.
-See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code).
-Path is relative to the root of the repository.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
diff --git a/spaces/MuhammedAyman29/Fruits/app.py b/spaces/MuhammedAyman29/Fruits/app.py
deleted file mode 100644
index ee4563bb0f0a92906a5d8b39d34410258e1e2883..0000000000000000000000000000000000000000
--- a/spaces/MuhammedAyman29/Fruits/app.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import pathlib
-temp = pathlib.WindowsPath
-pathlib.WindowsPath = pathlib.PosixPath
-
-import gradio as gr
-from fastai.vision.all import *
-import skimage
-
-
-learn= load_learner('export.pkl')
-def predict(img):
- #img = PILImage.create(img)
- pred,pred_idx,probs = learn.predict(img)
- return {labels[i]: float(probs[i]) for i in range(len(labels))}
-
-examples = ['Apple.jpg', 'banana.jpg']
-labels = learn.dls.vocab
-title = "Fruits Classifier"
-description = "Fruits classifier with fastai. Created as a demo for Gradio and HuggingFace Spaces."
-interpretation='default'
-enable_queue=True
-
-
-gr.Interface(fn=predict,inputs=gr.inputs.Image(shape=(192, 192)),outputs=gr.outputs.Label(num_top_classes=4),title=title,description=description,interpretation=interpretation,enable_queue=enable_queue).launch()
\ No newline at end of file
diff --git a/spaces/NCTCMumbai/NCTC/models/official/nlp/bert/tf2_encoder_checkpoint_converter.py b/spaces/NCTCMumbai/NCTC/models/official/nlp/bert/tf2_encoder_checkpoint_converter.py
deleted file mode 100644
index 2faf6ea2cfb9f0d71d0a79dff101e0408fa41778..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/official/nlp/bert/tf2_encoder_checkpoint_converter.py
+++ /dev/null
@@ -1,109 +0,0 @@
-# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""A converter from a V1 BERT encoder checkpoint to a V2 encoder checkpoint.
-
-The conversion will yield an object-oriented checkpoint that can be used
-to restore a TransformerEncoder object.
-"""
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import os
-
-from absl import app
-from absl import flags
-
-import tensorflow as tf
-from official.modeling import activations
-from official.nlp.bert import configs
-from official.nlp.bert import tf1_checkpoint_converter_lib
-from official.nlp.modeling import networks
-
-FLAGS = flags.FLAGS
-
-flags.DEFINE_string("bert_config_file", None,
- "Bert configuration file to define core bert layers.")
-flags.DEFINE_string(
- "checkpoint_to_convert", None,
- "Initial checkpoint from a pretrained BERT model core (that is, only the "
- "BertModel, with no task heads.)")
-flags.DEFINE_string("converted_checkpoint_path", None,
- "Name for the created object-based V2 checkpoint.")
-
-
-def _create_bert_model(cfg):
- """Creates a BERT keras core model from BERT configuration.
-
- Args:
- cfg: A `BertConfig` to create the core model.
- Returns:
- A TransformerEncoder netowork.
- """
- bert_encoder = networks.TransformerEncoder(
- vocab_size=cfg.vocab_size,
- hidden_size=cfg.hidden_size,
- num_layers=cfg.num_hidden_layers,
- num_attention_heads=cfg.num_attention_heads,
- intermediate_size=cfg.intermediate_size,
- activation=activations.gelu,
- dropout_rate=cfg.hidden_dropout_prob,
- attention_dropout_rate=cfg.attention_probs_dropout_prob,
- sequence_length=cfg.max_position_embeddings,
- type_vocab_size=cfg.type_vocab_size,
- initializer=tf.keras.initializers.TruncatedNormal(
- stddev=cfg.initializer_range),
- embedding_width=cfg.embedding_size)
-
- return bert_encoder
-
-
-def convert_checkpoint(bert_config, output_path, v1_checkpoint):
- """Converts a V1 checkpoint into an OO V2 checkpoint."""
- output_dir, _ = os.path.split(output_path)
-
- # Create a temporary V1 name-converted checkpoint in the output directory.
- temporary_checkpoint_dir = os.path.join(output_dir, "temp_v1")
- temporary_checkpoint = os.path.join(temporary_checkpoint_dir, "ckpt")
- tf1_checkpoint_converter_lib.convert(
- checkpoint_from_path=v1_checkpoint,
- checkpoint_to_path=temporary_checkpoint,
- num_heads=bert_config.num_attention_heads,
- name_replacements=tf1_checkpoint_converter_lib.BERT_V2_NAME_REPLACEMENTS,
- permutations=tf1_checkpoint_converter_lib.BERT_V2_PERMUTATIONS,
- exclude_patterns=["adam", "Adam"])
-
- # Create a V2 checkpoint from the temporary checkpoint.
- model = _create_bert_model(bert_config)
- tf1_checkpoint_converter_lib.create_v2_checkpoint(model, temporary_checkpoint,
- output_path)
-
- # Clean up the temporary checkpoint, if it exists.
- try:
- tf.io.gfile.rmtree(temporary_checkpoint_dir)
- except tf.errors.OpError:
- # If it doesn't exist, we don't need to clean it up; continue.
- pass
-
-
-def main(_):
- output_path = FLAGS.converted_checkpoint_path
- v1_checkpoint = FLAGS.checkpoint_to_convert
- bert_config = configs.BertConfig.from_json_file(FLAGS.bert_config_file)
- convert_checkpoint(bert_config, output_path, v1_checkpoint)
-
-
-if __name__ == "__main__":
- app.run(main)
diff --git a/spaces/NCTCMumbai/NCTC/models/official/recommendation/data_test.py b/spaces/NCTCMumbai/NCTC/models/official/recommendation/data_test.py
deleted file mode 100644
index 9541ee3f8bb4c65fb1f69070fa3876ee51b6c191..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/official/recommendation/data_test.py
+++ /dev/null
@@ -1,355 +0,0 @@
-# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Test NCF data pipeline."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-from collections import defaultdict
-import hashlib
-import os
-
-import mock
-import numpy as np
-import scipy.stats
-import tensorflow as tf
-
-from official.recommendation import constants as rconst
-from official.recommendation import data_preprocessing
-from official.recommendation import movielens
-from official.recommendation import popen_helper
-
-
-DATASET = "ml-test"
-NUM_USERS = 1000
-NUM_ITEMS = 2000
-NUM_PTS = 50000
-BATCH_SIZE = 2048
-EVAL_BATCH_SIZE = 4000
-NUM_NEG = 4
-
-
-END_TO_END_TRAIN_MD5 = "b218738e915e825d03939c5e305a2698"
-END_TO_END_EVAL_MD5 = "d753d0f3186831466d6e218163a9501e"
-FRESH_RANDOMNESS_MD5 = "63d0dff73c0e5f1048fbdc8c65021e22"
-
-
-def mock_download(*args, **kwargs):
- return
-
-
-# The forkpool used by data producers interacts badly with the threading
-# used by TestCase. Without this patch tests will hang, and no amount
-# of diligent closing and joining within the producer will prevent it.
-@mock.patch.object(popen_helper, "get_forkpool", popen_helper.get_fauxpool)
-class BaseTest(tf.test.TestCase):
-
- def setUp(self):
- tf.compat.v1.disable_eager_execution()
- self.temp_data_dir = self.get_temp_dir()
- ratings_folder = os.path.join(self.temp_data_dir, DATASET)
- tf.io.gfile.makedirs(ratings_folder)
- np.random.seed(0)
- raw_user_ids = np.arange(NUM_USERS * 3)
- np.random.shuffle(raw_user_ids)
- raw_user_ids = raw_user_ids[:NUM_USERS]
-
- raw_item_ids = np.arange(NUM_ITEMS * 3)
- np.random.shuffle(raw_item_ids)
- raw_item_ids = raw_item_ids[:NUM_ITEMS]
-
- users = np.random.choice(raw_user_ids, NUM_PTS)
- items = np.random.choice(raw_item_ids, NUM_PTS)
- scores = np.random.randint(low=0, high=5, size=NUM_PTS)
- times = np.random.randint(low=1000000000, high=1200000000, size=NUM_PTS)
-
- self.rating_file = os.path.join(ratings_folder, movielens.RATINGS_FILE)
- self.seen_pairs = set()
- self.holdout = {}
- with tf.io.gfile.GFile(self.rating_file, "w") as f:
- f.write("user_id,item_id,rating,timestamp\n")
- for usr, itm, scr, ts in zip(users, items, scores, times):
- pair = (usr, itm)
- if pair in self.seen_pairs:
- continue
- self.seen_pairs.add(pair)
- if usr not in self.holdout or (ts, itm) > self.holdout[usr]:
- self.holdout[usr] = (ts, itm)
-
- f.write("{},{},{},{}\n".format(usr, itm, scr, ts))
-
- movielens.download = mock_download
- movielens.NUM_RATINGS[DATASET] = NUM_PTS
- movielens.DATASET_TO_NUM_USERS_AND_ITEMS[DATASET] = (NUM_USERS, NUM_ITEMS)
-
- def make_params(self, train_epochs=1):
- return {
- "train_epochs": train_epochs,
- "batches_per_step": 1,
- "use_seed": False,
- "batch_size": BATCH_SIZE,
- "eval_batch_size": EVAL_BATCH_SIZE,
- "num_neg": NUM_NEG,
- "match_mlperf": True,
- "use_tpu": False,
- "use_xla_for_gpu": False,
- "stream_files": False,
- }
-
- def test_preprocessing(self):
- # For the most part the necessary checks are performed within
- # _filter_index_sort()
-
- cache_path = os.path.join(self.temp_data_dir, "test_cache.pickle")
- data, valid_cache = data_preprocessing._filter_index_sort(
- self.rating_file, cache_path=cache_path)
-
- assert len(data[rconst.USER_MAP]) == NUM_USERS
- assert len(data[rconst.ITEM_MAP]) == NUM_ITEMS
-
- def drain_dataset(self, dataset, g):
- # type: (tf.data.Dataset, tf.Graph) -> list
- with self.session(graph=g) as sess:
- with g.as_default():
- batch = tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
- output = []
- while True:
- try:
- output.append(sess.run(batch))
- except tf.errors.OutOfRangeError:
- break
- return output
-
- def _test_end_to_end(self, constructor_type):
- params = self.make_params(train_epochs=1)
- _, _, producer = data_preprocessing.instantiate_pipeline(
- dataset=DATASET, data_dir=self.temp_data_dir, params=params,
- constructor_type=constructor_type, deterministic=True)
-
- producer.start()
- producer.join()
- assert producer._fatal_exception is None
-
- user_inv_map = {v: k for k, v in producer.user_map.items()}
- item_inv_map = {v: k for k, v in producer.item_map.items()}
-
- # ==========================================================================
- # == Training Data =========================================================
- # ==========================================================================
- g = tf.Graph()
- with g.as_default():
- input_fn = producer.make_input_fn(is_training=True)
- dataset = input_fn(params)
-
- first_epoch = self.drain_dataset(dataset=dataset, g=g)
-
- counts = defaultdict(int)
- train_examples = {
- True: set(),
- False: set(),
- }
-
- md5 = hashlib.md5()
- for features, labels in first_epoch:
- data_list = [
- features[movielens.USER_COLUMN].flatten(),
- features[movielens.ITEM_COLUMN].flatten(),
- features[rconst.VALID_POINT_MASK].flatten(),
- labels.flatten()
- ]
- for i in data_list:
- md5.update(i.tobytes())
-
- for u, i, v, l in zip(*data_list):
- if not v:
- continue # ignore padding
-
- u_raw = user_inv_map[u]
- i_raw = item_inv_map[i]
- if ((u_raw, i_raw) in self.seen_pairs) != l:
- # The evaluation item is not considered during false negative
- # generation, so it will occasionally appear as a negative example
- # during training.
- assert not l
- self.assertEqual(i_raw, self.holdout[u_raw][1])
- train_examples[l].add((u_raw, i_raw))
- counts[(u_raw, i_raw)] += 1
-
- self.assertRegexpMatches(md5.hexdigest(), END_TO_END_TRAIN_MD5)
-
- num_positives_seen = len(train_examples[True])
- self.assertEqual(producer._train_pos_users.shape[0], num_positives_seen)
-
- # This check is more heuristic because negatives are sampled with
- # replacement. It only checks that negative generation is reasonably random.
- self.assertGreater(
- len(train_examples[False]) / NUM_NEG / num_positives_seen, 0.9)
-
- # This checks that the samples produced are independent by checking the
- # number of duplicate entries. If workers are not properly independent there
- # will be lots of repeated pairs.
- self.assertLess(np.mean(list(counts.values())), 1.1)
-
- # ==========================================================================
- # == Eval Data =============================================================
- # ==========================================================================
- with g.as_default():
- input_fn = producer.make_input_fn(is_training=False)
- dataset = input_fn(params)
-
- eval_data = self.drain_dataset(dataset=dataset, g=g)
-
- current_user = None
- md5 = hashlib.md5()
- for features in eval_data:
- data_list = [
- features[movielens.USER_COLUMN].flatten(),
- features[movielens.ITEM_COLUMN].flatten(),
- features[rconst.DUPLICATE_MASK].flatten()
- ]
- for i in data_list:
- md5.update(i.tobytes())
-
- for idx, (u, i, d) in enumerate(zip(*data_list)):
- u_raw = user_inv_map[u]
- i_raw = item_inv_map[i]
- if current_user is None:
- current_user = u
-
- # Ensure that users appear in blocks, as the evaluation logic expects
- # this structure.
- self.assertEqual(u, current_user)
-
- # The structure of evaluation data is 999 negative examples followed
- # by the holdout positive.
- if not (idx + 1) % (rconst.NUM_EVAL_NEGATIVES + 1):
- # Check that the last element in each chunk is the holdout item.
- self.assertEqual(i_raw, self.holdout[u_raw][1])
- current_user = None
-
- elif i_raw == self.holdout[u_raw][1]:
- # Because the holdout item is not given to the negative generation
- # process, it can appear as a negative. In that case, it should be
- # masked out as a duplicate. (Since the true positive is placed at
- # the end and would therefore lose the tie.)
- assert d
-
- else:
- # Otherwise check that the other 999 points for a user are selected
- # from the negatives.
- assert (u_raw, i_raw) not in self.seen_pairs
-
- self.assertRegexpMatches(md5.hexdigest(), END_TO_END_EVAL_MD5)
-
- def _test_fresh_randomness(self, constructor_type):
- train_epochs = 5
- params = self.make_params(train_epochs=train_epochs)
- _, _, producer = data_preprocessing.instantiate_pipeline(
- dataset=DATASET, data_dir=self.temp_data_dir, params=params,
- constructor_type=constructor_type, deterministic=True)
-
- producer.start()
-
- results = []
- g = tf.Graph()
- with g.as_default():
- for _ in range(train_epochs):
- input_fn = producer.make_input_fn(is_training=True)
- dataset = input_fn(params)
- results.extend(self.drain_dataset(dataset=dataset, g=g))
-
- producer.join()
- assert producer._fatal_exception is None
-
- positive_counts, negative_counts = defaultdict(int), defaultdict(int)
- md5 = hashlib.md5()
- for features, labels in results:
- data_list = [
- features[movielens.USER_COLUMN].flatten(),
- features[movielens.ITEM_COLUMN].flatten(),
- features[rconst.VALID_POINT_MASK].flatten(),
- labels.flatten()
- ]
- for i in data_list:
- md5.update(i.tobytes())
-
- for u, i, v, l in zip(*data_list):
- if not v:
- continue # ignore padding
-
- if l:
- positive_counts[(u, i)] += 1
- else:
- negative_counts[(u, i)] += 1
-
- self.assertRegexpMatches(md5.hexdigest(), FRESH_RANDOMNESS_MD5)
-
- # The positive examples should appear exactly once each epoch
- self.assertAllEqual(list(positive_counts.values()),
- [train_epochs for _ in positive_counts])
-
- # The threshold for the negatives is heuristic, but in general repeats are
- # expected, but should not appear too frequently.
-
- pair_cardinality = NUM_USERS * NUM_ITEMS
- neg_pair_cardinality = pair_cardinality - len(self.seen_pairs)
-
- # Approximation for the expectation number of times that a particular
- # negative will appear in a given epoch. Implicit in this calculation is the
- # treatment of all negative pairs as equally likely. Normally is not
- # necessarily reasonable; however the generation in self.setUp() will
- # approximate this behavior sufficiently for heuristic testing.
- e_sample = len(self.seen_pairs) * NUM_NEG / neg_pair_cardinality
-
- # The frequency of occurance of a given negative pair should follow an
- # approximately binomial distribution in the limit that the cardinality of
- # the negative pair set >> number of samples per epoch.
- approx_pdf = scipy.stats.binom.pmf(k=np.arange(train_epochs+1),
- n=train_epochs, p=e_sample)
-
- # Tally the actual observed counts.
- count_distribution = [0 for _ in range(train_epochs + 1)]
- for i in negative_counts.values():
- i = min([i, train_epochs]) # round down tail for simplicity.
- count_distribution[i] += 1
- count_distribution[0] = neg_pair_cardinality - sum(count_distribution[1:])
-
- # Check that the frequency of negative pairs is approximately binomial.
- for i in range(train_epochs + 1):
- if approx_pdf[i] < 0.05:
- continue # Variance will be high at the tails.
-
- observed_fraction = count_distribution[i] / neg_pair_cardinality
- deviation = (2 * abs(observed_fraction - approx_pdf[i]) /
- (observed_fraction + approx_pdf[i]))
-
- self.assertLess(deviation, 0.2)
-
- def test_end_to_end_materialized(self):
- self._test_end_to_end("materialized")
-
- def test_end_to_end_bisection(self):
- self._test_end_to_end("bisection")
-
- def test_fresh_randomness_materialized(self):
- self._test_fresh_randomness("materialized")
-
- def test_fresh_randomness_bisection(self):
- self._test_fresh_randomness("bisection")
-
-
-if __name__ == "__main__":
- tf.test.main()
diff --git a/spaces/Natsha/mocap-ai/README.md b/spaces/Natsha/mocap-ai/README.md
deleted file mode 100644
index 6e9094ffd8b8c3b8d102c171110b072c9457043d..0000000000000000000000000000000000000000
--- a/spaces/Natsha/mocap-ai/README.md
+++ /dev/null
@@ -1,51 +0,0 @@
----
-title: Optical Motion Capture AI
-sdk: docker
-app_port: 7860
-app_file: app.py
----
-
-# mocap-ai
-Functionality to load FBX files, extract animation, process the animation and write it back to the file.
-
-# Classifier
-* Globals: file with hardcoded values like the marker names.
-* Utilities:
- * Visualizations
-* FBX Handler:
- * Load the `.fbx` file.
- * Go through each frame in the animation frame range and check if all skeleton nodes have a keyframe there.
- * If a keyframe is missing, remove that frame number from the valid frame numbers.
- * After finding all valid frames, go through all marker translation channels and store the global transform in a `pandas` DataFrame.
- * Add the actor numbers as categorical variables.
- * Save the DataFrame to a `.csv` file.
-* Inference file loader
- * Same as training file loader, but this one should process all frames regardless of keyframe presence.
-* Data augmentation:
- * Isolate a marker set.
- * Translate and rotate (optionally scale) with boundary check.
-* Model builder:
- * Instantiate a model with various hyperparameters.
-* Training loop:
- * Train given model with callbacks.
-* Test loop:
- * Validate model on validation/test data.
-* Development script:
- * Create new model, train it and test it.
-* Deployment script:
- * Deploys the model in a Docker image on HuggingFace.
-
-
-## References:
-1. PointNet:
-- Research paper: Qi, Charles R., et al. "PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation." CVPR. 2017. [arXiv:1612.00593](https://arxiv.org/abs/1612.00593)
-- Official code repository (TensorFlow): https://github.com/charlesq34/pointnet
-- Official code repository (PyTorch): https://github.com/fxia22/pointnet.pytorch
-2. PointNet++:
-- Research paper: Qi, Charles R., et al. "PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space." NeurIPS. 2017. [arXiv:1706.02413](https://arxiv.org/abs/1706.02413)
-- Official code repository (TensorFlow): https://github.com/charlesq34/pointnet2
-- Official code repository (PyTorch): https://github.com/erikwijmans/Pointnet2_PyTorch
-3. DGCNN:
-- Research paper: Wang, Yue, et al. "Dynamic Graph CNN for Learning on Point Clouds." ACM Transactions on Graphics (TOG) 38.5 (2019): 1-12. [arXiv:1801.07829](https://arxiv.org/abs/1801.07829)
-- Official code repository (TensorFlow): https://github.com/WangYueFt/dgcnn
-- Official code repository (PyTorch): https://github.com/muhanzhang/DGCNN
\ No newline at end of file
diff --git a/spaces/Nee001/bing0/src/lib/hooks/chat-history.ts b/spaces/Nee001/bing0/src/lib/hooks/chat-history.ts
deleted file mode 100644
index c6fbf3fecfa86fe553f56acc8253236b8f22a775..0000000000000000000000000000000000000000
--- a/spaces/Nee001/bing0/src/lib/hooks/chat-history.ts
+++ /dev/null
@@ -1,62 +0,0 @@
-import { zip } from 'lodash-es'
-import { ChatMessageModel, BotId } from '@/lib/bots/bing/types'
-import { Storage } from '../storage'
-
-/**
- * conversations:$botId => Conversation[]
- * conversation:$botId:$cid:messages => ChatMessageModel[]
- */
-
-interface Conversation {
- id: string
- createdAt: number
-}
-
-type ConversationWithMessages = Conversation & { messages: ChatMessageModel[] }
-
-async function loadHistoryConversations(botId: BotId): Promise {
- const key = `conversations:${botId}`
- const { [key]: value } = await Storage.get(key)
- return value || []
-}
-
-async function deleteHistoryConversation(botId: BotId, cid: string) {
- const conversations = await loadHistoryConversations(botId)
- const newConversations = conversations.filter((c) => c.id !== cid)
- await Storage.set({ [`conversations:${botId}`]: newConversations })
-}
-
-async function loadConversationMessages(botId: BotId, cid: string): Promise {
- const key = `conversation:${botId}:${cid}:messages`
- const { [key]: value } = await Storage.get(key)
- return value || []
-}
-
-export async function setConversationMessages(botId: BotId, cid: string, messages: ChatMessageModel[]) {
- const conversations = await loadHistoryConversations(botId)
- if (!conversations.some((c) => c.id === cid)) {
- conversations.unshift({ id: cid, createdAt: Date.now() })
- await Storage.set({ [`conversations:${botId}`]: conversations })
- }
- const key = `conversation:${botId}:${cid}:messages`
- await Storage.set({ [key]: messages })
-}
-
-export async function loadHistoryMessages(botId: BotId): Promise {
- const conversations = await loadHistoryConversations(botId)
- const messagesList = await Promise.all(conversations.map((c) => loadConversationMessages(botId, c.id)))
- return zip(conversations, messagesList).map(([c, messages]) => ({
- id: c!.id,
- createdAt: c!.createdAt,
- messages: messages!,
- }))
-}
-
-export async function deleteHistoryMessage(botId: BotId, conversationId: string, messageId: string) {
- const messages = await loadConversationMessages(botId, conversationId)
- const newMessages = messages.filter((m) => m.id !== messageId)
- await setConversationMessages(botId, conversationId, newMessages)
- if (!newMessages.length) {
- await deleteHistoryConversation(botId, conversationId)
- }
-}
diff --git a/spaces/NimaBoscarino/climategan/climategan/depth.py b/spaces/NimaBoscarino/climategan/climategan/depth.py
deleted file mode 100644
index d8d408448b82b1d11043131b61897b8467192e65..0000000000000000000000000000000000000000
--- a/spaces/NimaBoscarino/climategan/climategan/depth.py
+++ /dev/null
@@ -1,230 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from climategan.blocks import BaseDecoder, Conv2dBlock, InterpolateNearest2d
-from climategan.utils import find_target_size
-
-
-def create_depth_decoder(opts, no_init=False, verbose=0):
- if opts.gen.d.architecture == "base":
- decoder = BaseDepthDecoder(opts)
- if "s" in opts.task:
- assert opts.gen.s.use_dada is False
- if "m" in opts.tasks:
- assert opts.gen.m.use_dada is False
- else:
- decoder = DADADepthDecoder(opts)
-
- if verbose > 0:
- print(f" - Add {decoder.__class__.__name__}")
-
- return decoder
-
-
-class DADADepthDecoder(nn.Module):
- """
- Depth decoder based on depth auxiliary task in DADA paper
- """
-
- def __init__(self, opts):
- super().__init__()
- if (
- opts.gen.encoder.architecture == "deeplabv3"
- and opts.gen.deeplabv3.backbone == "mobilenet"
- ):
- res_dim = 320
- else:
- res_dim = 2048
-
- mid_dim = 512
-
- self.do_feat_fusion = False
- if opts.gen.m.use_dada or ("s" in opts.tasks and opts.gen.s.use_dada):
- self.do_feat_fusion = True
- self.dec4 = Conv2dBlock(
- 128,
- res_dim,
- 1,
- stride=1,
- padding=0,
- bias=True,
- activation="lrelu",
- norm="none",
- )
-
- self.relu = nn.ReLU(inplace=True)
- self.enc4_1 = Conv2dBlock(
- res_dim,
- mid_dim,
- 1,
- stride=1,
- padding=0,
- bias=False,
- activation="lrelu",
- pad_type="reflect",
- norm="batch",
- )
- self.enc4_2 = Conv2dBlock(
- mid_dim,
- mid_dim,
- 3,
- stride=1,
- padding=1,
- bias=False,
- activation="lrelu",
- pad_type="reflect",
- norm="batch",
- )
- self.enc4_3 = Conv2dBlock(
- mid_dim,
- 128,
- 1,
- stride=1,
- padding=0,
- bias=False,
- activation="lrelu",
- pad_type="reflect",
- norm="batch",
- )
- self.upsample = None
- if opts.gen.d.upsample_featuremaps:
- self.upsample = nn.Sequential(
- *[
- InterpolateNearest2d(),
- Conv2dBlock(
- 128,
- 32,
- 3,
- stride=1,
- padding=1,
- bias=False,
- activation="lrelu",
- pad_type="reflect",
- norm="batch",
- ),
- nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
- ]
- )
- self._target_size = find_target_size(opts, "d")
- print(
- " - {}: setting target size to {}".format(
- self.__class__.__name__, self._target_size
- )
- )
-
- def set_target_size(self, size):
- """
- Set final interpolation's target size
-
- Args:
- size (int, list, tuple): target size (h, w). If int, target will be (i, i)
- """
- if isinstance(size, (list, tuple)):
- self._target_size = size[:2]
- else:
- self._target_size = (size, size)
-
- def forward(self, z):
- if isinstance(z, (list, tuple)):
- z = z[0]
- z4_enc = self.enc4_1(z)
- z4_enc = self.enc4_2(z4_enc)
- z4_enc = self.enc4_3(z4_enc)
-
- z_depth = None
- if self.do_feat_fusion:
- z_depth = self.dec4(z4_enc)
-
- if self.upsample is not None:
- z4_enc = self.upsample(z4_enc)
-
- depth = torch.mean(z4_enc, dim=1, keepdim=True) # DADA paper decoder
- if depth.shape[-1] != self._target_size:
- depth = F.interpolate(
- depth,
- size=(384, 384), # size used in MiDaS inference
- mode="bicubic", # what MiDaS uses
- align_corners=False,
- )
-
- depth = F.interpolate(
- depth, (self._target_size, self._target_size), mode="nearest"
- ) # what we used in the transforms to resize input
-
- return depth, z_depth
-
- def __str__(self):
- return "DADA Depth Decoder"
-
-
-class BaseDepthDecoder(BaseDecoder):
- def __init__(self, opts):
- low_level_feats_dim = -1
- use_v3 = opts.gen.encoder.architecture == "deeplabv3"
- use_mobile_net = opts.gen.deeplabv3.backbone == "mobilenet"
- use_low = opts.gen.d.use_low_level_feats
-
- if use_v3 and use_mobile_net:
- input_dim = 320
- if use_low:
- low_level_feats_dim = 24
- elif use_v3:
- input_dim = 2048
- if use_low:
- low_level_feats_dim = 256
- else:
- input_dim = 2048
-
- n_upsample = 1 if opts.gen.d.upsample_featuremaps else 0
- output_dim = (
- 1
- if not opts.gen.d.classify.enable
- else opts.gen.d.classify.linspace.buckets
- )
-
- self._target_size = find_target_size(opts, "d")
- print(
- " - {}: setting target size to {}".format(
- self.__class__.__name__, self._target_size
- )
- )
-
- super().__init__(
- n_upsample=n_upsample,
- n_res=opts.gen.d.n_res,
- input_dim=input_dim,
- proj_dim=opts.gen.d.proj_dim,
- output_dim=output_dim,
- norm=opts.gen.d.norm,
- activ=opts.gen.d.activ,
- pad_type=opts.gen.d.pad_type,
- output_activ="none",
- low_level_feats_dim=low_level_feats_dim,
- )
-
- def set_target_size(self, size):
- """
- Set final interpolation's target size
-
- Args:
- size (int, list, tuple): target size (h, w). If int, target will be (i, i)
- """
- if isinstance(size, (list, tuple)):
- self._target_size = size[:2]
- else:
- self._target_size = (size, size)
-
- def forward(self, z, cond=None):
- if self._target_size is None:
- error = "self._target_size should be set with self.set_target_size()"
- error += "to interpolate depth to the target depth map's size"
- raise ValueError(error)
-
- d = super().forward(z)
-
- preds = F.interpolate(
- d, size=self._target_size, mode="bilinear", align_corners=True
- )
-
- return preds, None
diff --git a/spaces/Nyari/Super-Resolution-Anime-Diffusion/RealESRGANv030/realesrgan/archs/discriminator_arch.py b/spaces/Nyari/Super-Resolution-Anime-Diffusion/RealESRGANv030/realesrgan/archs/discriminator_arch.py
deleted file mode 100644
index ccd810559201624bc6c20ea9b60009b927ecadd6..0000000000000000000000000000000000000000
--- a/spaces/Nyari/Super-Resolution-Anime-Diffusion/RealESRGANv030/realesrgan/archs/discriminator_arch.py
+++ /dev/null
@@ -1,67 +0,0 @@
-from basicsr.utils.registry import ARCH_REGISTRY
-from torch import nn as nn
-from torch.nn import functional as F
-from torch.nn.utils import spectral_norm
-
-
-@ARCH_REGISTRY.register()
-class UNetDiscriminatorSN(nn.Module):
- """Defines a U-Net discriminator with spectral normalization (SN)
-
- It is used in Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data.
-
- Arg:
- num_in_ch (int): Channel number of inputs. Default: 3.
- num_feat (int): Channel number of base intermediate features. Default: 64.
- skip_connection (bool): Whether to use skip connections between U-Net. Default: True.
- """
-
- def __init__(self, num_in_ch, num_feat=64, skip_connection=True):
- super(UNetDiscriminatorSN, self).__init__()
- self.skip_connection = skip_connection
- norm = spectral_norm
- # the first convolution
- self.conv0 = nn.Conv2d(num_in_ch, num_feat, kernel_size=3, stride=1, padding=1)
- # downsample
- self.conv1 = norm(nn.Conv2d(num_feat, num_feat * 2, 4, 2, 1, bias=False))
- self.conv2 = norm(nn.Conv2d(num_feat * 2, num_feat * 4, 4, 2, 1, bias=False))
- self.conv3 = norm(nn.Conv2d(num_feat * 4, num_feat * 8, 4, 2, 1, bias=False))
- # upsample
- self.conv4 = norm(nn.Conv2d(num_feat * 8, num_feat * 4, 3, 1, 1, bias=False))
- self.conv5 = norm(nn.Conv2d(num_feat * 4, num_feat * 2, 3, 1, 1, bias=False))
- self.conv6 = norm(nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1, bias=False))
- # extra convolutions
- self.conv7 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False))
- self.conv8 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False))
- self.conv9 = nn.Conv2d(num_feat, 1, 3, 1, 1)
-
- def forward(self, x):
- # downsample
- x0 = F.leaky_relu(self.conv0(x), negative_slope=0.2, inplace=True)
- x1 = F.leaky_relu(self.conv1(x0), negative_slope=0.2, inplace=True)
- x2 = F.leaky_relu(self.conv2(x1), negative_slope=0.2, inplace=True)
- x3 = F.leaky_relu(self.conv3(x2), negative_slope=0.2, inplace=True)
-
- # upsample
- x3 = F.interpolate(x3, scale_factor=2, mode="bilinear", align_corners=False)
- x4 = F.leaky_relu(self.conv4(x3), negative_slope=0.2, inplace=True)
-
- if self.skip_connection:
- x4 = x4 + x2
- x4 = F.interpolate(x4, scale_factor=2, mode="bilinear", align_corners=False)
- x5 = F.leaky_relu(self.conv5(x4), negative_slope=0.2, inplace=True)
-
- if self.skip_connection:
- x5 = x5 + x1
- x5 = F.interpolate(x5, scale_factor=2, mode="bilinear", align_corners=False)
- x6 = F.leaky_relu(self.conv6(x5), negative_slope=0.2, inplace=True)
-
- if self.skip_connection:
- x6 = x6 + x0
-
- # extra convolutions
- out = F.leaky_relu(self.conv7(x6), negative_slope=0.2, inplace=True)
- out = F.leaky_relu(self.conv8(out), negative_slope=0.2, inplace=True)
- out = self.conv9(out)
-
- return out
diff --git a/spaces/OAOA/DifFace/basicsr/ops/fused_act/__init__.py b/spaces/OAOA/DifFace/basicsr/ops/fused_act/__init__.py
deleted file mode 100644
index 241dc0754fae7d88dbbd9a02e665ca30a73c7422..0000000000000000000000000000000000000000
--- a/spaces/OAOA/DifFace/basicsr/ops/fused_act/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from .fused_act import FusedLeakyReLU, fused_leaky_relu
-
-__all__ = ['FusedLeakyReLU', 'fused_leaky_relu']
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/textless_nlp/gslm/README.md b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/textless_nlp/gslm/README.md
deleted file mode 100644
index 7a76ffd57c066c20af94aa3fca24c18e2ba4c3dd..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/textless_nlp/gslm/README.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# Generative Spoken Language Modeling
-
-* [Paper](https://arxiv.org/abs/2102.01192)
-* [Demo](https://speechbot.github.io/gslm/index.html)
-
-We build and evaluate generative speech2speech systems using [Log Mel Filtebank](https://pytorch.org/audio/stable/compliance.kaldi.html#fbank), [Modified CPC](https://github.com/facebookresearch/CPC_audio), [HuBERT Base](https://github.com/pytorch/fairseq/tree/main/examples/hubert) and [Wav2Vec 2.0 Large](https://github.com/pytorch/fairseq/tree/main/examples/wav2vec). Our system is composed of three components, namely, *speech2unit*, *ulm* and *unit2speech*. We explain about models and usage of these components in their respective sub-directories. See the links below.
-
-## Speech to Unit Model (speech2unit)
-Speech to unit model is used for quantizing raw speech into learned discrete speech units. [More details](speech2unit)
-
-## Unit Language Model (ulm)
-Unit Language Model is a generative language model trained on discrete speech units. [More details](ulm)
-
-## Unit to Speech Model (unit2speech)
-Unit to speech model is used for synthesizing speech from discrete speech units. [More details](unit2speech)
-
-## Metrics
-We show how to compute ASR based metrics as well as zero-shot metrics proposed in our paper [here](metrics).
-
-## Tools
-We share two tools to resynthesize a given spoken utterance, and generate novel spoken language given a spoken prompt. [More detail](tools)
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/hubert/simple_kmeans/dump_mfcc_feature.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/hubert/simple_kmeans/dump_mfcc_feature.py
deleted file mode 100644
index 70d0016663b7d0b90033f4eb301b527f2c92a3f8..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/hubert/simple_kmeans/dump_mfcc_feature.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import logging
-import os
-import sys
-
-import soundfile as sf
-import torch
-import torchaudio
-
-from feature_utils import get_path_iterator, dump_feature
-
-logging.basicConfig(
- format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
- datefmt="%Y-%m-%d %H:%M:%S",
- level=os.environ.get("LOGLEVEL", "INFO").upper(),
- stream=sys.stdout,
-)
-logger = logging.getLogger("dump_mfcc_feature")
-
-
-class MfccFeatureReader(object):
- def __init__(self, sample_rate):
- self.sample_rate = sample_rate
-
- def read_audio(self, path, ref_len=None):
- wav, sr = sf.read(path)
- assert sr == self.sample_rate, sr
- if wav.ndim == 2:
- wav = wav.mean(-1)
- assert wav.ndim == 1, wav.ndim
- if ref_len is not None and abs(ref_len - len(wav)) > 160:
- logging.warning(f"ref {ref_len} != read {len(wav)} ({path})")
- return wav
-
- def get_feats(self, path, ref_len=None):
- x = self.read_audio(path, ref_len)
- with torch.no_grad():
- x = torch.from_numpy(x).float()
- x = x.view(1, -1)
-
- mfccs = torchaudio.compliance.kaldi.mfcc(
- waveform=x,
- sample_frequency=self.sample_rate,
- use_energy=False,
- ) # (time, freq)
- mfccs = mfccs.transpose(0, 1) # (freq, time)
- deltas = torchaudio.functional.compute_deltas(mfccs)
- ddeltas = torchaudio.functional.compute_deltas(deltas)
- concat = torch.cat([mfccs, deltas, ddeltas], dim=0)
- concat = concat.transpose(0, 1).contiguous() # (freq, time)
- return concat
-
-
-def main(tsv_dir, split, nshard, rank, feat_dir, sample_rate):
- reader = MfccFeatureReader(sample_rate)
- generator, num = get_path_iterator(f"{tsv_dir}/{split}.tsv", nshard, rank)
- dump_feature(reader, generator, num, split, nshard, rank, feat_dir)
-
-
-
-if __name__ == "__main__":
- import argparse
-
- parser = argparse.ArgumentParser()
- parser.add_argument("tsv_dir")
- parser.add_argument("split")
- parser.add_argument("nshard", type=int)
- parser.add_argument("rank", type=int)
- parser.add_argument("feat_dir")
- parser.add_argument("--sample_rate", type=int, default=16000)
- args = parser.parse_args()
- logger.info(args)
-
- main(**vars(args))
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/nonautoregressive_translation/README.md b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/nonautoregressive_translation/README.md
deleted file mode 100644
index 8793e225c99732c42c9c19e22075cde37c73341d..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/nonautoregressive_translation/README.md
+++ /dev/null
@@ -1,146 +0,0 @@
-# Non-autoregressive Neural Machine Translation (NAT)
-
-This page mainly includes instructions for reproducing results from the following papers
-* [Levenshtein Transformer (Gu et al., 2019)](https://arxiv.org/abs/1905.11006).
-* [Understanding Knowledge Distillation in Non-autoregressive Machine Translation (Zhou et al., 2019)](https://arxiv.org/abs/1911.02727).
-
-We also provided our own implementations for several popular non-autoregressive-based models as reference:
-* [Non-Autoregressive Neural Machine Translation (Gu et al., 2017)](https://arxiv.org/abs/1711.02281)
-* [Deterministic Non-Autoregressive Neural Sequence Modeling by Iterative Refinement (Lee et al., 2018)](https://arxiv.org/abs/1802.06901)
-* [Insertion Transformer: Flexible Sequence Generation via Insertion Operations (Stern et al., 2019)](https://arxiv.org/abs/1902.03249)
-* [Mask-Predict: Parallel Decoding of Conditional Masked Language Models (Ghazvininejad et al., 2019)](https://arxiv.org/abs/1904.09324v2)
-* [Fast Structured Decoding for Sequence Models (Sun et al., 2019)](https://arxiv.org/abs/1910.11555)
-
-## Dataset
-
-First, follow the [instructions to download and preprocess the WMT'14 En-De dataset](../translation#wmt14-english-to-german-convolutional).
-Make sure to learn a joint vocabulary by passing the `--joined-dictionary` option to `fairseq-preprocess`.
-
-### Knowledge Distillation
-Following [Gu et al. 2019](https://arxiv.org/abs/1905.11006), [knowledge distillation](https://arxiv.org/abs/1606.07947) from an autoregressive model can effectively simplify the training data distribution, which is sometimes essential for NAT-based models to learn good translations.
-The easiest way of performing distillation is to follow the [instructions of training a standard transformer model](../translation) on the same data, and then decode the training set to produce a distillation dataset for NAT.
-
-### Download
-We also provided the preprocessed [original](http://dl.fbaipublicfiles.com/nat/original_dataset.zip) and [distillation](http://dl.fbaipublicfiles.com/nat/distill_dataset.zip) datasets. Please build the binarized dataset on your own.
-
-
-## Train a model
-
-Then we can train a nonautoregressive model using the `translation_lev` task and a new criterion `nat_loss`.
-Use the `--noise` flag to specify the input noise used on the target sentences.
-In default, we run the task for *Levenshtein Transformer*, with `--noise='random_delete'`. Full scripts to run other models can also be found [here](./scripts.md).
-
-The following command will train a *Levenshtein Transformer* on the binarized dataset.
-
-```bash
-fairseq-train \
- data-bin/wmt14_en_de_distill \
- --save-dir checkpoints \
- --ddp-backend=legacy_ddp \
- --task translation_lev \
- --criterion nat_loss \
- --arch levenshtein_transformer \
- --noise random_delete \
- --share-all-embeddings \
- --optimizer adam --adam-betas '(0.9,0.98)' \
- --lr 0.0005 --lr-scheduler inverse_sqrt \
- --stop-min-lr '1e-09' --warmup-updates 10000 \
- --warmup-init-lr '1e-07' --label-smoothing 0.1 \
- --dropout 0.3 --weight-decay 0.01 \
- --decoder-learned-pos \
- --encoder-learned-pos \
- --apply-bert-init \
- --log-format 'simple' --log-interval 100 \
- --fixed-validation-seed 7 \
- --max-tokens 8000 \
- --save-interval-updates 10000 \
- --max-update 300000
-```
-
-## Translate
-
-Once a model is trained, we can generate translations using an `iterative_refinement_generator` which will based on the model's initial output and iteratively read and greedily refine the translation until (1) the model predicts the same translations for two consecutive iterations; or (2) the generator reaches the maximum iterations (`--iter-decode-max-iter`). Use `--print-step` to check the actual # of iteration for each sentence.
-
-For *Levenshtein Transformer*, it sometimes helps to apply a `--iter-decode-eos-penalty` (typically, 0~3) to penalize the model finishing generation too early and generating too short translations.
-
-For example, to generate with `--iter-decode-max-iter=9`:
-```bash
-fairseq-generate \
- data-bin/wmt14_en_de_distill \
- --gen-subset test \
- --task translation_lev \
- --path checkpoints/checkpoint_best.pt \
- --iter-decode-max-iter 9 \
- --iter-decode-eos-penalty 0 \
- --beam 1 --remove-bpe \
- --print-step \
- --batch-size 400
-```
-In the end of the generation, we can see the tokenized BLEU score for the translation.
-
-## Advanced Decoding Methods
-### Ensemble
-The NAT models use special implementations of [ensembling](https://github.com/fairinternal/fairseq-py/blob/b98d88da52f2f21f1b169bab8c70c1c4ca19a768/fairseq/sequence_generator.py#L522) to support iterative refinement and a variety of parallel operations in different models, while it shares the same API as standard autoregressive models as follows:
-```bash
-fairseq-generate \
- data-bin/wmt14_en_de_distill \
- --gen-subset test \
- --task translation_lev \
- --path checkpoint_1.pt:checkpoint_2.pt:checkpoint_3.pt \
- --iter-decode-max-iter 9 \
- --iter-decode-eos-penalty 0 \
- --beam 1 --remove-bpe \
- --print-step \
- --batch-size 400
-```
-We use ``:`` to split multiple models. Note that, not all NAT models support ensembling for now.
-
-
-### Length-beam
-For models that predict lengths before decoding (e.g. the vanilla NAT, Mask-Predict, etc), it is possible to improve the translation quality by varying the target lengths around the predicted value, and translating the same example multiple times in parallel. We can select the best translation with the highest scores defined by your model's output.
-
-Note that, not all models support length beams. For models which dynamically change the lengths (e.g. *Insertion Transformer*, *Levenshtein Transformer*), the same trick does not apply.
-
-### Re-ranking
-If the model generates multiple translations with length beam, we can also introduce an autoregressive model to rerank the translations considering scoring from an autoregressive model is much faster than decoding from that.
-
-For example, to generate translations with length beam and reranking,
-```bash
-fairseq-generate \
- data-bin/wmt14_en_de_distill \
- --gen-subset test \
- --task translation_lev \
- --path checkpoints/checkpoint_best.pt:at_checkpoints/checkpoint_best.pt \
- --iter-decode-max-iter 9 \
- --iter-decode-eos-penalty 0 \
- --iter-decode-with-beam 9 \
- --iter-decode-with-external-reranker \
- --beam 1 --remove-bpe \
- --print-step \
- --batch-size 100
-```
-Note that we need to make sure the autoregressive model shares the same vocabulary as our target non-autoregressive model.
-
-
-## Citation
-
-```bibtex
-@incollection{NIPS2019_9297,
- title = {Levenshtein Transformer},
- author = {Gu, Jiatao and Wang, Changhan and Zhao, Junbo},
- booktitle = {Advances in Neural Information Processing Systems 32},
- editor = {H. Wallach and H. Larochelle and A. Beygelzimer and F. d\textquotesingle Alch\'{e}-Buc and E. Fox and R. Garnett},
- pages = {11179--11189},
- year = {2019},
- publisher = {Curran Associates, Inc.},
- url = {http://papers.nips.cc/paper/9297-levenshtein-transformer.pdf}
-}
-```
-```bibtex
-@article{zhou2019understanding,
- title={Understanding Knowledge Distillation in Non-autoregressive Machine Translation},
- author={Zhou, Chunting and Neubig, Graham and Gu, Jiatao},
- journal={arXiv preprint arXiv:1911.02727},
- year={2019}
-}
-```
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/lstm.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/lstm.py
deleted file mode 100644
index e1e66a7d50fa1b1b313e9d1a6e7862ac9bfaa074..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/lstm.py
+++ /dev/null
@@ -1,753 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from typing import Dict, List, Optional, Tuple
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from fairseq import utils
-from fairseq.models import (
- FairseqEncoder,
- FairseqEncoderDecoderModel,
- FairseqIncrementalDecoder,
- register_model,
- register_model_architecture,
-)
-from fairseq.modules import AdaptiveSoftmax, FairseqDropout
-from torch import Tensor
-
-
-DEFAULT_MAX_SOURCE_POSITIONS = 1e5
-DEFAULT_MAX_TARGET_POSITIONS = 1e5
-
-
-@register_model("lstm")
-class LSTMModel(FairseqEncoderDecoderModel):
- def __init__(self, encoder, decoder):
- super().__init__(encoder, decoder)
-
- @staticmethod
- def add_args(parser):
- """Add model-specific arguments to the parser."""
- # fmt: off
- parser.add_argument('--dropout', type=float, metavar='D',
- help='dropout probability')
- parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
- help='encoder embedding dimension')
- parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
- help='path to pre-trained encoder embedding')
- parser.add_argument('--encoder-freeze-embed', action='store_true',
- help='freeze encoder embeddings')
- parser.add_argument('--encoder-hidden-size', type=int, metavar='N',
- help='encoder hidden size')
- parser.add_argument('--encoder-layers', type=int, metavar='N',
- help='number of encoder layers')
- parser.add_argument('--encoder-bidirectional', action='store_true',
- help='make all layers of encoder bidirectional')
- parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
- help='decoder embedding dimension')
- parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
- help='path to pre-trained decoder embedding')
- parser.add_argument('--decoder-freeze-embed', action='store_true',
- help='freeze decoder embeddings')
- parser.add_argument('--decoder-hidden-size', type=int, metavar='N',
- help='decoder hidden size')
- parser.add_argument('--decoder-layers', type=int, metavar='N',
- help='number of decoder layers')
- parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N',
- help='decoder output embedding dimension')
- parser.add_argument('--decoder-attention', type=str, metavar='BOOL',
- help='decoder attention')
- parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
- help='comma separated list of adaptive softmax cutoff points. '
- 'Must be used with adaptive_loss criterion')
- parser.add_argument('--share-decoder-input-output-embed', default=False,
- action='store_true',
- help='share decoder input and output embeddings')
- parser.add_argument('--share-all-embeddings', default=False, action='store_true',
- help='share encoder, decoder and output embeddings'
- ' (requires shared dictionary and embed dim)')
-
- # Granular dropout settings (if not specified these default to --dropout)
- parser.add_argument('--encoder-dropout-in', type=float, metavar='D',
- help='dropout probability for encoder input embedding')
- parser.add_argument('--encoder-dropout-out', type=float, metavar='D',
- help='dropout probability for encoder output')
- parser.add_argument('--decoder-dropout-in', type=float, metavar='D',
- help='dropout probability for decoder input embedding')
- parser.add_argument('--decoder-dropout-out', type=float, metavar='D',
- help='dropout probability for decoder output')
- # fmt: on
-
- @classmethod
- def build_model(cls, args, task):
- """Build a new model instance."""
- # make sure that all args are properly defaulted (in case there are any new ones)
- base_architecture(args)
-
- if args.encoder_layers != args.decoder_layers:
- raise ValueError("--encoder-layers must match --decoder-layers")
-
- max_source_positions = getattr(
- args, "max_source_positions", DEFAULT_MAX_SOURCE_POSITIONS
- )
- max_target_positions = getattr(
- args, "max_target_positions", DEFAULT_MAX_TARGET_POSITIONS
- )
-
- def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim):
- num_embeddings = len(dictionary)
- padding_idx = dictionary.pad()
- embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
- embed_dict = utils.parse_embedding(embed_path)
- utils.print_embed_overlap(embed_dict, dictionary)
- return utils.load_embedding(embed_dict, dictionary, embed_tokens)
-
- if args.encoder_embed_path:
- pretrained_encoder_embed = load_pretrained_embedding_from_file(
- args.encoder_embed_path, task.source_dictionary, args.encoder_embed_dim
- )
- else:
- num_embeddings = len(task.source_dictionary)
- pretrained_encoder_embed = Embedding(
- num_embeddings, args.encoder_embed_dim, task.source_dictionary.pad()
- )
-
- if args.share_all_embeddings:
- # double check all parameters combinations are valid
- if task.source_dictionary != task.target_dictionary:
- raise ValueError("--share-all-embeddings requires a joint dictionary")
- if args.decoder_embed_path and (
- args.decoder_embed_path != args.encoder_embed_path
- ):
- raise ValueError(
- "--share-all-embed not compatible with --decoder-embed-path"
- )
- if args.encoder_embed_dim != args.decoder_embed_dim:
- raise ValueError(
- "--share-all-embeddings requires --encoder-embed-dim to "
- "match --decoder-embed-dim"
- )
- pretrained_decoder_embed = pretrained_encoder_embed
- args.share_decoder_input_output_embed = True
- else:
- # separate decoder input embeddings
- pretrained_decoder_embed = None
- if args.decoder_embed_path:
- pretrained_decoder_embed = load_pretrained_embedding_from_file(
- args.decoder_embed_path,
- task.target_dictionary,
- args.decoder_embed_dim,
- )
- # one last double check of parameter combinations
- if args.share_decoder_input_output_embed and (
- args.decoder_embed_dim != args.decoder_out_embed_dim
- ):
- raise ValueError(
- "--share-decoder-input-output-embeddings requires "
- "--decoder-embed-dim to match --decoder-out-embed-dim"
- )
-
- if args.encoder_freeze_embed:
- pretrained_encoder_embed.weight.requires_grad = False
- if args.decoder_freeze_embed:
- pretrained_decoder_embed.weight.requires_grad = False
-
- encoder = LSTMEncoder(
- dictionary=task.source_dictionary,
- embed_dim=args.encoder_embed_dim,
- hidden_size=args.encoder_hidden_size,
- num_layers=args.encoder_layers,
- dropout_in=args.encoder_dropout_in,
- dropout_out=args.encoder_dropout_out,
- bidirectional=args.encoder_bidirectional,
- pretrained_embed=pretrained_encoder_embed,
- max_source_positions=max_source_positions,
- )
- decoder = LSTMDecoder(
- dictionary=task.target_dictionary,
- embed_dim=args.decoder_embed_dim,
- hidden_size=args.decoder_hidden_size,
- out_embed_dim=args.decoder_out_embed_dim,
- num_layers=args.decoder_layers,
- dropout_in=args.decoder_dropout_in,
- dropout_out=args.decoder_dropout_out,
- attention=utils.eval_bool(args.decoder_attention),
- encoder_output_units=encoder.output_units,
- pretrained_embed=pretrained_decoder_embed,
- share_input_output_embed=args.share_decoder_input_output_embed,
- adaptive_softmax_cutoff=(
- utils.eval_str_list(args.adaptive_softmax_cutoff, type=int)
- if args.criterion == "adaptive_loss"
- else None
- ),
- max_target_positions=max_target_positions,
- residuals=False,
- )
- return cls(encoder, decoder)
-
- def forward(
- self,
- src_tokens,
- src_lengths,
- prev_output_tokens,
- incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
- ):
- encoder_out = self.encoder(src_tokens, src_lengths=src_lengths)
- decoder_out = self.decoder(
- prev_output_tokens,
- encoder_out=encoder_out,
- incremental_state=incremental_state,
- )
- return decoder_out
-
-
-class LSTMEncoder(FairseqEncoder):
- """LSTM encoder."""
-
- def __init__(
- self,
- dictionary,
- embed_dim=512,
- hidden_size=512,
- num_layers=1,
- dropout_in=0.1,
- dropout_out=0.1,
- bidirectional=False,
- left_pad=True,
- pretrained_embed=None,
- padding_idx=None,
- max_source_positions=DEFAULT_MAX_SOURCE_POSITIONS,
- ):
- super().__init__(dictionary)
- self.num_layers = num_layers
- self.dropout_in_module = FairseqDropout(
- dropout_in*1.0, module_name=self.__class__.__name__
- )
- self.dropout_out_module = FairseqDropout(
- dropout_out*1.0, module_name=self.__class__.__name__
- )
- self.bidirectional = bidirectional
- self.hidden_size = hidden_size
- self.max_source_positions = max_source_positions
-
- num_embeddings = len(dictionary)
- self.padding_idx = padding_idx if padding_idx is not None else dictionary.pad()
- if pretrained_embed is None:
- self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx)
- else:
- self.embed_tokens = pretrained_embed
-
- self.lstm = LSTM(
- input_size=embed_dim,
- hidden_size=hidden_size,
- num_layers=num_layers,
- dropout=self.dropout_out_module.p if num_layers > 1 else 0.0,
- bidirectional=bidirectional,
- )
- self.left_pad = left_pad
-
- self.output_units = hidden_size
- if bidirectional:
- self.output_units *= 2
-
- def forward(
- self,
- src_tokens: Tensor,
- src_lengths: Tensor,
- enforce_sorted: bool = True,
- ):
- """
- Args:
- src_tokens (LongTensor): tokens in the source language of
- shape `(batch, src_len)`
- src_lengths (LongTensor): lengths of each source sentence of
- shape `(batch)`
- enforce_sorted (bool, optional): if True, `src_tokens` is
- expected to contain sequences sorted by length in a
- decreasing order. If False, this condition is not
- required. Default: True.
- """
- if self.left_pad:
- # nn.utils.rnn.pack_padded_sequence requires right-padding;
- # convert left-padding to right-padding
- src_tokens = utils.convert_padding_direction(
- src_tokens,
- torch.zeros_like(src_tokens).fill_(self.padding_idx),
- left_to_right=True,
- )
-
- bsz, seqlen = src_tokens.size()
-
- # embed tokens
- x = self.embed_tokens(src_tokens)
- x = self.dropout_in_module(x)
-
- # B x T x C -> T x B x C
- x = x.transpose(0, 1)
-
- # pack embedded source tokens into a PackedSequence
- packed_x = nn.utils.rnn.pack_padded_sequence(
- x, src_lengths.cpu(), enforce_sorted=enforce_sorted
- )
-
- # apply LSTM
- if self.bidirectional:
- state_size = 2 * self.num_layers, bsz, self.hidden_size
- else:
- state_size = self.num_layers, bsz, self.hidden_size
- h0 = x.new_zeros(*state_size)
- c0 = x.new_zeros(*state_size)
- packed_outs, (final_hiddens, final_cells) = self.lstm(packed_x, (h0, c0))
-
- # unpack outputs and apply dropout
- x, _ = nn.utils.rnn.pad_packed_sequence(
- packed_outs, padding_value=self.padding_idx * 1.0
- )
- x = self.dropout_out_module(x)
- assert list(x.size()) == [seqlen, bsz, self.output_units]
-
- if self.bidirectional:
- final_hiddens = self.combine_bidir(final_hiddens, bsz)
- final_cells = self.combine_bidir(final_cells, bsz)
-
- encoder_padding_mask = src_tokens.eq(self.padding_idx).t()
-
- return tuple(
- (
- x, # seq_len x batch x hidden
- final_hiddens, # num_layers x batch x num_directions*hidden
- final_cells, # num_layers x batch x num_directions*hidden
- encoder_padding_mask, # seq_len x batch
- )
- )
-
- def combine_bidir(self, outs, bsz: int):
- out = outs.view(self.num_layers, 2, bsz, -1).transpose(1, 2).contiguous()
- return out.view(self.num_layers, bsz, -1)
-
- def reorder_encoder_out(self, encoder_out: Tuple[Tensor, Tensor, Tensor, Tensor], new_order):
- return tuple(
- (
- encoder_out[0].index_select(1, new_order),
- encoder_out[1].index_select(1, new_order),
- encoder_out[2].index_select(1, new_order),
- encoder_out[3].index_select(1, new_order),
- )
- )
-
- def max_positions(self):
- """Maximum input length supported by the encoder."""
- return self.max_source_positions
-
-
-class AttentionLayer(nn.Module):
- def __init__(self, input_embed_dim, source_embed_dim, output_embed_dim, bias=False):
- super().__init__()
-
- self.input_proj = Linear(input_embed_dim, source_embed_dim, bias=bias)
- self.output_proj = Linear(
- input_embed_dim + source_embed_dim, output_embed_dim, bias=bias
- )
-
- def forward(self, input, source_hids, encoder_padding_mask):
- # input: bsz x input_embed_dim
- # source_hids: srclen x bsz x source_embed_dim
-
- # x: bsz x source_embed_dim
- x = self.input_proj(input)
-
- # compute attention
- attn_scores = (source_hids * x.unsqueeze(0)).sum(dim=2)
-
- # don't attend over padding
- if encoder_padding_mask is not None:
- attn_scores = (
- attn_scores.float()
- .masked_fill_(encoder_padding_mask, float("-inf"))
- .type_as(attn_scores)
- ) # FP16 support: cast to float and back
-
- attn_scores = F.softmax(attn_scores, dim=0) # srclen x bsz
-
- # sum weighted sources
- x = (attn_scores.unsqueeze(2) * source_hids).sum(dim=0)
-
- x = torch.tanh(self.output_proj(torch.cat((x, input), dim=1)))
- return x, attn_scores
-
-
-class LSTMDecoder(FairseqIncrementalDecoder):
- """LSTM decoder."""
-
- def __init__(
- self,
- dictionary,
- embed_dim=512,
- hidden_size=512,
- out_embed_dim=512,
- num_layers=1,
- dropout_in=0.1,
- dropout_out=0.1,
- attention=True,
- encoder_output_units=512,
- pretrained_embed=None,
- share_input_output_embed=False,
- adaptive_softmax_cutoff=None,
- max_target_positions=DEFAULT_MAX_TARGET_POSITIONS,
- residuals=False,
- ):
- super().__init__(dictionary)
- self.dropout_in_module = FairseqDropout(
- dropout_in*1.0, module_name=self.__class__.__name__
- )
- self.dropout_out_module = FairseqDropout(
- dropout_out*1.0, module_name=self.__class__.__name__
- )
- self.hidden_size = hidden_size
- self.share_input_output_embed = share_input_output_embed
- self.need_attn = True
- self.max_target_positions = max_target_positions
- self.residuals = residuals
- self.num_layers = num_layers
-
- self.adaptive_softmax = None
- num_embeddings = len(dictionary)
- padding_idx = dictionary.pad()
- if pretrained_embed is None:
- self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
- else:
- self.embed_tokens = pretrained_embed
-
- self.encoder_output_units = encoder_output_units
- if encoder_output_units != hidden_size and encoder_output_units != 0:
- self.encoder_hidden_proj = Linear(encoder_output_units, hidden_size)
- self.encoder_cell_proj = Linear(encoder_output_units, hidden_size)
- else:
- self.encoder_hidden_proj = self.encoder_cell_proj = None
-
- # disable input feeding if there is no encoder
- # input feeding is described in arxiv.org/abs/1508.04025
- input_feed_size = 0 if encoder_output_units == 0 else hidden_size
- self.layers = nn.ModuleList(
- [
- LSTMCell(
- input_size=input_feed_size + embed_dim
- if layer == 0
- else hidden_size,
- hidden_size=hidden_size,
- )
- for layer in range(num_layers)
- ]
- )
-
- if attention:
- # TODO make bias configurable
- self.attention = AttentionLayer(
- hidden_size, encoder_output_units, hidden_size, bias=False
- )
- else:
- self.attention = None
-
- if hidden_size != out_embed_dim:
- self.additional_fc = Linear(hidden_size, out_embed_dim)
-
- if adaptive_softmax_cutoff is not None:
- # setting adaptive_softmax dropout to dropout_out for now but can be redefined
- self.adaptive_softmax = AdaptiveSoftmax(
- num_embeddings,
- hidden_size,
- adaptive_softmax_cutoff,
- dropout=dropout_out,
- )
- elif not self.share_input_output_embed:
- self.fc_out = Linear(out_embed_dim, num_embeddings, dropout=dropout_out)
-
- def forward(
- self,
- prev_output_tokens,
- encoder_out: Optional[Tuple[Tensor, Tensor, Tensor, Tensor]] = None,
- incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
- src_lengths: Optional[Tensor] = None,
- ):
- x, attn_scores = self.extract_features(
- prev_output_tokens, encoder_out, incremental_state
- )
- return self.output_layer(x), attn_scores
-
- def extract_features(
- self,
- prev_output_tokens,
- encoder_out: Optional[Tuple[Tensor, Tensor, Tensor, Tensor]] = None,
- incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
- ):
- """
- Similar to *forward* but only return features.
- """
- # get outputs from encoder
- if encoder_out is not None:
- encoder_outs = encoder_out[0]
- encoder_hiddens = encoder_out[1]
- encoder_cells = encoder_out[2]
- encoder_padding_mask = encoder_out[3]
- else:
- encoder_outs = torch.empty(0)
- encoder_hiddens = torch.empty(0)
- encoder_cells = torch.empty(0)
- encoder_padding_mask = torch.empty(0)
- srclen = encoder_outs.size(0)
-
- if incremental_state is not None and len(incremental_state) > 0:
- prev_output_tokens = prev_output_tokens[:, -1:]
-
- bsz, seqlen = prev_output_tokens.size()
-
- # embed tokens
- x = self.embed_tokens(prev_output_tokens)
- x = self.dropout_in_module(x)
-
- # B x T x C -> T x B x C
- x = x.transpose(0, 1)
-
- # initialize previous states (or get from cache during incremental generation)
- if incremental_state is not None and len(incremental_state) > 0:
- prev_hiddens, prev_cells, input_feed = self.get_cached_state(
- incremental_state
- )
- elif encoder_out is not None:
- # setup recurrent cells
- prev_hiddens = [encoder_hiddens[i] for i in range(self.num_layers)]
- prev_cells = [encoder_cells[i] for i in range(self.num_layers)]
- if self.encoder_hidden_proj is not None:
- prev_hiddens = [self.encoder_hidden_proj(y) for y in prev_hiddens]
- prev_cells = [self.encoder_cell_proj(y) for y in prev_cells]
- input_feed = x.new_zeros(bsz, self.hidden_size)
- else:
- # setup zero cells, since there is no encoder
- zero_state = x.new_zeros(bsz, self.hidden_size)
- prev_hiddens = [zero_state for i in range(self.num_layers)]
- prev_cells = [zero_state for i in range(self.num_layers)]
- input_feed = None
-
- assert (
- srclen > 0 or self.attention is None
- ), "attention is not supported if there are no encoder outputs"
- attn_scores: Optional[Tensor] = (
- x.new_zeros(srclen, seqlen, bsz) if self.attention is not None else None
- )
- outs = []
- for j in range(seqlen):
- # input feeding: concatenate context vector from previous time step
- if input_feed is not None:
- input = torch.cat((x[j, :, :], input_feed), dim=1)
- else:
- input = x[j]
-
- for i, rnn in enumerate(self.layers):
- # recurrent cell
- hidden, cell = rnn(input, (prev_hiddens[i], prev_cells[i]))
-
- # hidden state becomes the input to the next layer
- input = self.dropout_out_module(hidden)
- if self.residuals:
- input = input + prev_hiddens[i]
-
- # save state for next time step
- prev_hiddens[i] = hidden
- prev_cells[i] = cell
-
- # apply attention using the last layer's hidden state
- if self.attention is not None:
- assert attn_scores is not None
- out, attn_scores[:, j, :] = self.attention(
- hidden, encoder_outs, encoder_padding_mask
- )
- else:
- out = hidden
- out = self.dropout_out_module(out)
-
- # input feeding
- if input_feed is not None:
- input_feed = out
-
- # save final output
- outs.append(out)
-
- # Stack all the necessary tensors together and store
- prev_hiddens_tensor = torch.stack(prev_hiddens)
- prev_cells_tensor = torch.stack(prev_cells)
- cache_state = torch.jit.annotate(
- Dict[str, Optional[Tensor]],
- {
- "prev_hiddens": prev_hiddens_tensor,
- "prev_cells": prev_cells_tensor,
- "input_feed": input_feed,
- },
- )
- self.set_incremental_state(incremental_state, "cached_state", cache_state)
-
- # collect outputs across time steps
- x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size)
-
- # T x B x C -> B x T x C
- x = x.transpose(1, 0)
-
- if hasattr(self, "additional_fc") and self.adaptive_softmax is None:
- x = self.additional_fc(x)
- x = self.dropout_out_module(x)
- # srclen x tgtlen x bsz -> bsz x tgtlen x srclen
- if not self.training and self.need_attn and self.attention is not None:
- assert attn_scores is not None
- attn_scores = attn_scores.transpose(0, 2)
- else:
- attn_scores = None
- return x, attn_scores
-
- def output_layer(self, x):
- """Project features to the vocabulary size."""
- if self.adaptive_softmax is None:
- if self.share_input_output_embed:
- x = F.linear(x, self.embed_tokens.weight)
- else:
- x = self.fc_out(x)
- return x
-
- def get_cached_state(
- self,
- incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
- ) -> Tuple[List[Tensor], List[Tensor], Optional[Tensor]]:
- cached_state = self.get_incremental_state(incremental_state, "cached_state")
- assert cached_state is not None
- prev_hiddens_ = cached_state["prev_hiddens"]
- assert prev_hiddens_ is not None
- prev_cells_ = cached_state["prev_cells"]
- assert prev_cells_ is not None
- prev_hiddens = [prev_hiddens_[i] for i in range(self.num_layers)]
- prev_cells = [prev_cells_[j] for j in range(self.num_layers)]
- input_feed = cached_state[
- "input_feed"
- ] # can be None for decoder-only language models
- return prev_hiddens, prev_cells, input_feed
-
- def reorder_incremental_state(
- self,
- incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
- new_order: Tensor,
- ):
- if incremental_state is None or len(incremental_state) == 0:
- return
- prev_hiddens, prev_cells, input_feed = self.get_cached_state(incremental_state)
- prev_hiddens = [p.index_select(0, new_order) for p in prev_hiddens]
- prev_cells = [p.index_select(0, new_order) for p in prev_cells]
- if input_feed is not None:
- input_feed = input_feed.index_select(0, new_order)
- cached_state_new = torch.jit.annotate(
- Dict[str, Optional[Tensor]],
- {
- "prev_hiddens": torch.stack(prev_hiddens),
- "prev_cells": torch.stack(prev_cells),
- "input_feed": input_feed,
- },
- )
- self.set_incremental_state(incremental_state, "cached_state", cached_state_new),
- return
-
- def max_positions(self):
- """Maximum output length supported by the decoder."""
- return self.max_target_positions
-
- def make_generation_fast_(self, need_attn=False, **kwargs):
- self.need_attn = need_attn
-
-
-def Embedding(num_embeddings, embedding_dim, padding_idx):
- m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
- nn.init.uniform_(m.weight, -0.1, 0.1)
- nn.init.constant_(m.weight[padding_idx], 0)
- return m
-
-
-def LSTM(input_size, hidden_size, **kwargs):
- m = nn.LSTM(input_size, hidden_size, **kwargs)
- for name, param in m.named_parameters():
- if "weight" in name or "bias" in name:
- param.data.uniform_(-0.1, 0.1)
- return m
-
-
-def LSTMCell(input_size, hidden_size, **kwargs):
- m = nn.LSTMCell(input_size, hidden_size, **kwargs)
- for name, param in m.named_parameters():
- if "weight" in name or "bias" in name:
- param.data.uniform_(-0.1, 0.1)
- return m
-
-
-def Linear(in_features, out_features, bias=True, dropout=0.0):
- """Linear layer (input: N x T x C)"""
- m = nn.Linear(in_features, out_features, bias=bias)
- m.weight.data.uniform_(-0.1, 0.1)
- if bias:
- m.bias.data.uniform_(-0.1, 0.1)
- return m
-
-
-@register_model_architecture("lstm", "lstm")
-def base_architecture(args):
- args.dropout = getattr(args, "dropout", 0.1)
- args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
- args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
- args.encoder_freeze_embed = getattr(args, "encoder_freeze_embed", False)
- args.encoder_hidden_size = getattr(
- args, "encoder_hidden_size", args.encoder_embed_dim
- )
- args.encoder_layers = getattr(args, "encoder_layers", 1)
- args.encoder_bidirectional = getattr(args, "encoder_bidirectional", False)
- args.encoder_dropout_in = getattr(args, "encoder_dropout_in", args.dropout)
- args.encoder_dropout_out = getattr(args, "encoder_dropout_out", args.dropout)
- args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
- args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
- args.decoder_freeze_embed = getattr(args, "decoder_freeze_embed", False)
- args.decoder_hidden_size = getattr(
- args, "decoder_hidden_size", args.decoder_embed_dim
- )
- args.decoder_layers = getattr(args, "decoder_layers", 1)
- args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512)
- args.decoder_attention = getattr(args, "decoder_attention", "1")
- args.decoder_dropout_in = getattr(args, "decoder_dropout_in", args.dropout)
- args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout)
- args.share_decoder_input_output_embed = getattr(
- args, "share_decoder_input_output_embed", False
- )
- args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
- args.adaptive_softmax_cutoff = getattr(
- args, "adaptive_softmax_cutoff", "10000,50000,200000"
- )
-
-
-@register_model_architecture("lstm", "lstm_wiseman_iwslt_de_en")
-def lstm_wiseman_iwslt_de_en(args):
- args.dropout = getattr(args, "dropout", 0.1)
- args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
- args.encoder_dropout_in = getattr(args, "encoder_dropout_in", 0)
- args.encoder_dropout_out = getattr(args, "encoder_dropout_out", 0)
- args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256)
- args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256)
- args.decoder_dropout_in = getattr(args, "decoder_dropout_in", 0)
- args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout)
- base_architecture(args)
-
-
-@register_model_architecture("lstm", "lstm_luong_wmt_en_de")
-def lstm_luong_wmt_en_de(args):
- args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1000)
- args.encoder_layers = getattr(args, "encoder_layers", 4)
- args.encoder_dropout_out = getattr(args, "encoder_dropout_out", 0)
- args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1000)
- args.decoder_layers = getattr(args, "decoder_layers", 4)
- args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 1000)
- args.decoder_dropout_out = getattr(args, "decoder_dropout_out", 0)
- base_architecture(args)
diff --git a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/discriminative_reranking_nmt/models/__init__.py b/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/discriminative_reranking_nmt/models/__init__.py
deleted file mode 100644
index c593ea5f1842794bfcc952fc93c679a5f16aeb98..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/discriminative_reranking_nmt/models/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-from .discriminative_reranking_model import DiscriminativeNMTReranker
-
-
-__all__ = [
- "DiscriminativeNMTReranker",
-]
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/criss/sentence_retrieval/encoder_analysis.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/criss/sentence_retrieval/encoder_analysis.py
deleted file mode 100644
index b41bfbe38789ba14e6a5ea938c75d761424c00ab..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/criss/sentence_retrieval/encoder_analysis.py
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/usr/bin/env python3 -u
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-import argparse
-import glob
-
-import numpy as np
-
-
-DIM = 1024
-
-
-def compute_dist(source_embs, target_embs, k=5, return_sim_mat=False):
- target_ids = [tid for tid in target_embs]
- source_mat = np.stack(source_embs.values(), axis=0)
- normalized_source_mat = source_mat / np.linalg.norm(
- source_mat, axis=1, keepdims=True
- )
- target_mat = np.stack(target_embs.values(), axis=0)
- normalized_target_mat = target_mat / np.linalg.norm(
- target_mat, axis=1, keepdims=True
- )
- sim_mat = normalized_source_mat.dot(normalized_target_mat.T)
- if return_sim_mat:
- return sim_mat
- neighbors_map = {}
- for i, sentence_id in enumerate(source_embs):
- idx = np.argsort(sim_mat[i, :])[::-1][:k]
- neighbors_map[sentence_id] = [target_ids[tid] for tid in idx]
- return neighbors_map
-
-
-def load_embeddings(directory, LANGS):
- sentence_embeddings = {}
- sentence_texts = {}
- for lang in LANGS:
- sentence_embeddings[lang] = {}
- sentence_texts[lang] = {}
- lang_dir = f"{directory}/{lang}"
- embedding_files = glob.glob(f"{lang_dir}/all_avg_pool.{lang}.*")
- for embed_file in embedding_files:
- shard_id = embed_file.split(".")[-1]
- embeddings = np.fromfile(embed_file, dtype=np.float32)
- num_rows = embeddings.shape[0] // DIM
- embeddings = embeddings.reshape((num_rows, DIM))
-
- with open(f"{lang_dir}/sentences.{lang}.{shard_id}") as sentence_file:
- for idx, line in enumerate(sentence_file):
- sentence_id, sentence = line.strip().split("\t")
- sentence_texts[lang][sentence_id] = sentence
- sentence_embeddings[lang][sentence_id] = embeddings[idx, :]
-
- return sentence_embeddings, sentence_texts
-
-
-def compute_accuracy(directory, LANGS):
- sentence_embeddings, sentence_texts = load_embeddings(directory, LANGS)
-
- top_1_accuracy = {}
-
- top1_str = " ".join(LANGS) + "\n"
- for source_lang in LANGS:
- top_1_accuracy[source_lang] = {}
- top1_str += f"{source_lang} "
- for target_lang in LANGS:
- top1 = 0
- top5 = 0
- neighbors_map = compute_dist(
- sentence_embeddings[source_lang], sentence_embeddings[target_lang]
- )
- for sentence_id, neighbors in neighbors_map.items():
- if sentence_id == neighbors[0]:
- top1 += 1
- if sentence_id in neighbors[:5]:
- top5 += 1
- n = len(sentence_embeddings[target_lang])
- top1_str += f"{top1/n} "
- top1_str += "\n"
-
- print(top1_str)
- print(top1_str, file=open(f"{directory}/accuracy", "w"))
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser(description="Analyze encoder outputs")
- parser.add_argument("directory", help="Source language corpus")
- parser.add_argument("--langs", help="List of langs")
- args = parser.parse_args()
- langs = args.langs.split(",")
- compute_accuracy(args.directory, langs)
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/sparse_multihead_attention.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/sparse_multihead_attention.py
deleted file mode 100644
index 3cbd9d6785886e319aab0601517e27df733b6f97..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/sparse_multihead_attention.py
+++ /dev/null
@@ -1,140 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import math
-
-import torch
-
-from .multihead_attention import MultiheadAttention
-
-
-class SparseMultiheadAttention(MultiheadAttention):
- """Sparse Multi-Headed Attention.
-
- "Generating Long Sequences with Sparse Transformers". Implements
- fixed factorized self attention, where l=stride and c=expressivity.
- A(1) includes all words in the stride window and A(2) takes a summary of c
- words from the end of each stride window.
- If is_bidirectional=False, we do not include any words past the current word,
- as in the paper.
- """
-
- def __init__(
- self,
- embed_dim,
- num_heads,
- kdim=None,
- vdim=None,
- dropout=0.0,
- bias=True,
- add_bias_kv=False,
- add_zero_attn=False,
- self_attention=False,
- encoder_decoder_attention=False,
- stride=32,
- expressivity=8,
- is_bidirectional=True,
- ):
-
- super().__init__(
- embed_dim,
- num_heads,
- kdim,
- vdim,
- dropout,
- bias,
- add_bias_kv,
- add_zero_attn,
- self_attention,
- encoder_decoder_attention,
- )
-
- self.is_bidirectional = is_bidirectional
- self.stride = stride
- self.expressivity = expressivity
- assert self.stride > 0 and self.stride >= self.expressivity
-
- # Used for Ai(2) calculations - beginning of [l-c, l] range
- def compute_checkpoint(self, word_index):
- if word_index % self.stride == 0 and word_index != 0:
- checkpoint_index = word_index - self.expressivity
- else:
- checkpoint_index = (
- math.floor(word_index / self.stride) * self.stride
- + self.stride
- - self.expressivity
- )
- return checkpoint_index
-
- # Computes Ai(2)
- def compute_subset_summaries(self, absolute_max):
- checkpoint_index = self.compute_checkpoint(0)
- subset_two = set()
- while checkpoint_index <= absolute_max - 1:
- summary = set(
- range(
- checkpoint_index,
- min(checkpoint_index + self.expressivity + 1, absolute_max),
- )
- )
- subset_two = subset_two.union(summary)
- checkpoint_index = self.compute_checkpoint(checkpoint_index + self.stride)
- return subset_two
-
- # Sparse Transformer Fixed Attention Pattern: https://arxiv.org/pdf/1904.10509.pdf
- def compute_fixed_attention_subset(self, word_index, tgt_len):
- # +1s account for range function; [min, max) -> [min, max]
- if not self.is_bidirectional:
- absolute_max = word_index + 1
- else:
- absolute_max = tgt_len
-
- # Subset 1 - whole window
- rounded_index = (
- math.floor((word_index + self.stride) / self.stride) * self.stride
- )
- if word_index % self.stride == 0 and word_index != 0:
- subset_one = set(
- range(word_index - self.stride, min(absolute_max, word_index + 1))
- )
- else:
- subset_one = set(
- range(
- max(0, rounded_index - self.stride),
- min(absolute_max, rounded_index + 1),
- )
- )
-
- # Subset 2 - summary per window
- # If bidirectional, subset 2 is the same for every index
- subset_two = set()
- if not self.is_bidirectional:
- subset_two = self.compute_subset_summaries(absolute_max)
-
- return subset_one.union(subset_two)
-
- # Compute sparse mask - if bidirectional, can pre-compute and store
- def buffered_sparse_mask(self, tensor, tgt_len, src_len):
- assert tgt_len > self.stride
- sparse_mask = torch.empty((tgt_len, src_len)).float().fill_(float("-inf"))
-
- # If bidirectional, subset 2 is the same for every index
- subset_summaries = set()
- if self.is_bidirectional:
- subset_summaries = self.compute_subset_summaries(tgt_len)
-
- for i in range(tgt_len):
- fixed_attention_subset = self.compute_fixed_attention_subset(i, tgt_len)
- fixed_attention_subset = fixed_attention_subset.union(subset_summaries)
- included_word_indices = torch.LongTensor(list(fixed_attention_subset))
- sparse_mask[i].index_fill_(0, included_word_indices, 0)
- return sparse_mask.type_as(tensor)
-
- def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz):
- sparse_mask = self.buffered_sparse_mask(attn_weights, tgt_len, src_len)
- sparse_mask = sparse_mask.unsqueeze(0).expand(
- bsz * self.num_heads, tgt_len, src_len
- )
- attn_weights += sparse_mask
diff --git a/spaces/Omnibus/MusicGen/tests/modules/test_rope.py b/spaces/Omnibus/MusicGen/tests/modules/test_rope.py
deleted file mode 100644
index 067c6f067acbf27fb0fef5c2b812c22474c4fcd0..0000000000000000000000000000000000000000
--- a/spaces/Omnibus/MusicGen/tests/modules/test_rope.py
+++ /dev/null
@@ -1,168 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import torch
-
-from audiocraft.modules.rope import RotaryEmbedding
-from audiocraft.modules.transformer import StreamingTransformer, set_efficient_attention_backend
-
-
-def test_rope():
- set_efficient_attention_backend('xformers')
- B, T, H, C = 8, 75, 16, 128
-
- rope = RotaryEmbedding(dim=C)
- xq = torch.rand((B, T, H, C))
- xk = torch.rand((B, T, H, C))
- xq_out, xk_out = rope.rotate_qk(xq, xk, start=7)
-
- assert list(xq_out.shape) == [B, T, H, C]
- assert list(xk_out.shape) == [B, T, H, C]
-
-
-def test_rope_io_dtypes():
- set_efficient_attention_backend('xformers')
- B, T, H, C = 8, 75, 16, 128
-
- rope_32 = RotaryEmbedding(dim=C, dtype=torch.float32)
- rope_64 = RotaryEmbedding(dim=C, dtype=torch.float64)
-
- # Test bfloat16 inputs w/ both 32 and 64 precision rope.
- xq_16 = torch.rand((B, T, H, C)).to(torch.bfloat16)
- xk_16 = torch.rand((B, T, H, C)).to(torch.bfloat16)
- xq_out, xk_out = rope_32.rotate_qk(xq_16, xk_16)
- assert xq_out.dtype == torch.bfloat16
- xq_out, xk_out = rope_64.rotate_qk(xq_16, xk_16)
- assert xq_out.dtype == torch.bfloat16
-
- # Test float32 inputs w/ both 32 and 64 precision rope.
- xq_32 = torch.rand((B, T, H, C)).to(torch.float32)
- xk_32 = torch.rand((B, T, H, C)).to(torch.float32)
- xq_out, xk_out = rope_32.rotate_qk(xq_32, xk_32)
- assert xq_out.dtype == torch.float32
- xq_out, xk_out = rope_64.rotate_qk(xq_32, xk_32)
- assert xq_out.dtype == torch.float32
-
-
-def test_transformer_with_rope():
- set_efficient_attention_backend('xformers')
- torch.manual_seed(1234)
- for pos in ['rope', 'sin_rope']:
- tr = StreamingTransformer(
- 16, 4, 2, custom=True, dropout=0., layer_scale=0.1,
- positional_embedding=pos)
- tr.eval()
- steps = 12
- x = torch.randn(3, steps, 16)
-
- out = tr(x)
- assert list(out.shape) == list(x.shape)
-
-
-@torch.no_grad()
-def test_rope_streaming():
- set_efficient_attention_backend('xformers')
- torch.manual_seed(1234)
- tr = StreamingTransformer(
- 16, 4, 2, causal=True, dropout=0.,
- custom=True, positional_embedding='rope')
- tr.eval()
- steps = 12
- x = torch.randn(3, steps, 16)
-
- ref = tr(x)
-
- with tr.streaming():
- outs = []
- frame_sizes = [1] * steps
-
- for frame_size in frame_sizes:
- frame = x[:, :frame_size]
- x = x[:, frame_size:]
- outs.append(tr(frame))
-
- out = torch.cat(outs, dim=1)
- assert list(out.shape) == [3, steps, 16]
- delta = torch.norm(out - ref) / torch.norm(out)
- assert delta < 1e-6, delta
-
-
-@torch.no_grad()
-def test_rope_streaming_past_context():
- set_efficient_attention_backend('xformers')
- torch.manual_seed(1234)
-
- for context in [None, 10]:
- tr = StreamingTransformer(
- 16, 4, 1 if context else 2,
- causal=True, past_context=context, custom=True,
- dropout=0., positional_embedding='rope')
- tr.eval()
-
- steps = 20
- x = torch.randn(3, steps, 16)
- ref = tr(x)
-
- with tr.streaming():
- outs = []
- frame_sizes = [1] * steps
-
- for frame_size in frame_sizes:
- frame = x[:, :frame_size]
- x = x[:, frame_size:]
- outs.append(tr(frame))
-
- out = torch.cat(outs, dim=1)
- assert list(out.shape) == [3, steps, 16]
- delta = torch.norm(out - ref) / torch.norm(out)
- assert delta < 1e-6, delta
-
-
-def test_rope_memory_efficient():
- set_efficient_attention_backend('xformers')
- torch.manual_seed(1234)
- tr = StreamingTransformer(
- 16, 4, 2, custom=True, dropout=0., layer_scale=0.1,
- positional_embedding='rope')
- tr_mem_efficient = StreamingTransformer(
- 16, 4, 2, dropout=0., memory_efficient=True, layer_scale=0.1,
- positional_embedding='rope')
- tr_mem_efficient.load_state_dict(tr.state_dict())
- tr.eval()
- steps = 12
- x = torch.randn(3, steps, 16)
-
- with torch.no_grad():
- y = tr(x)
- y2 = tr_mem_efficient(x)
- # Check at float precision b/c this is the rope default.
- assert torch.allclose(y, y2, atol=1e-7), (y - y2).norm()
-
-
-def test_rope_with_xpos():
- set_efficient_attention_backend('xformers')
- B, T, H, C = 8, 75, 16, 128
-
- rope = RotaryEmbedding(dim=C, xpos=True)
- xq = torch.rand((B, T, H, C))
- xk = torch.rand((B, T, H, C))
- xq_out, xk_out = rope.rotate_qk(xq, xk, start=7)
-
- assert list(xq_out.shape) == [B, T, H, C]
- assert list(xk_out.shape) == [B, T, H, C]
-
-
-def test_positional_scale():
- set_efficient_attention_backend('xformers')
- B, T, H, C = 8, 75, 16, 128
-
- rope = RotaryEmbedding(dim=C, xpos=True, scale=0.0)
- xq = torch.rand((B, T, H, C))
- xk = torch.rand((B, T, H, C))
- xq_out, xk_out = rope.rotate_qk(xq, xk, start=7)
-
- assert torch.allclose(xq, xq_out)
- assert torch.allclose(xk, xk_out)
diff --git a/spaces/OpenMotionLab/MotionGPT/mGPT/render/blender/vertices.py b/spaces/OpenMotionLab/MotionGPT/mGPT/render/blender/vertices.py
deleted file mode 100644
index 78be1b12a2fec4ca43ab9065e99a0a1ba368be5a..0000000000000000000000000000000000000000
--- a/spaces/OpenMotionLab/MotionGPT/mGPT/render/blender/vertices.py
+++ /dev/null
@@ -1,17 +0,0 @@
-import numpy as np
-
-
-def prepare_vertices(vertices, canonicalize=True):
- data = vertices
- # Swap axis (gravity=Z instead of Y)
- # data = data[..., [2, 0, 1]]
-
- # Make left/right correct
- # data[..., [1]] = -data[..., [1]]
-
- # Center the first root to the first frame
- data -= data[[0], [0], :]
-
- # Remove the floor
- data[..., 2] -= np.min(data[..., 2])
- return data
diff --git a/spaces/OptimalScale/Robin-7b/lmflow/datasets/__init__.py b/spaces/OptimalScale/Robin-7b/lmflow/datasets/__init__.py
deleted file mode 100644
index a0342a0fd34525ffa7731ddbed4015bb3555651c..0000000000000000000000000000000000000000
--- a/spaces/OptimalScale/Robin-7b/lmflow/datasets/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-"""This Python code defines a class Dataset with methods for initializing, loading,
-and manipulating datasets from different backends such as Hugging Face and JSON.
-
-The `Dataset` class includes methods for loading datasets from a dictionary and a Hugging
-Face dataset, mapping datasets, and retrieving the backend dataset and arguments.
-"""
-from lmflow.datasets.dataset import Dataset
diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/cnn/builder.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/cnn/builder.py
deleted file mode 100644
index 7567316c566bd3aca6d8f65a84b00e9e890948a7..0000000000000000000000000000000000000000
--- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/cnn/builder.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from ..runner import Sequential
-from ..utils import Registry, build_from_cfg
-
-
-def build_model_from_cfg(cfg, registry, default_args=None):
- """Build a PyTorch model from config dict(s). Different from
- ``build_from_cfg``, if cfg is a list, a ``nn.Sequential`` will be built.
-
- Args:
- cfg (dict, list[dict]): The config of modules, is is either a config
- dict or a list of config dicts. If cfg is a list, a
- the built modules will be wrapped with ``nn.Sequential``.
- registry (:obj:`Registry`): A registry the module belongs to.
- default_args (dict, optional): Default arguments to build the module.
- Defaults to None.
-
- Returns:
- nn.Module: A built nn module.
- """
- if isinstance(cfg, list):
- modules = [
- build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
- ]
- return Sequential(*modules)
- else:
- return build_from_cfg(cfg, registry, default_args)
-
-
-MODELS = Registry('model', build_func=build_model_from_cfg)
diff --git a/spaces/PKUWilliamYang/StyleGANEX/models/mtcnn/__init__.py b/spaces/PKUWilliamYang/StyleGANEX/models/mtcnn/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/srfi/srfi-60.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/srfi/srfi-60.go
deleted file mode 100644
index 346acd81a76fd816759258e3bcf7661e4b0f7db7..0000000000000000000000000000000000000000
Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/srfi/srfi-60.go and /dev/null differ
diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/share/lilypond/2.24.2/python/lilylib.py b/spaces/Pattr/DrumClassification/lilypond-2.24.2/share/lilypond/2.24.2/python/lilylib.py
deleted file mode 100644
index 34de352a580dd3a96c7fbf8de3a9bab2bf10ffcb..0000000000000000000000000000000000000000
--- a/spaces/Pattr/DrumClassification/lilypond-2.24.2/share/lilypond/2.24.2/python/lilylib.py
+++ /dev/null
@@ -1,141 +0,0 @@
-# This file is part of LilyPond, the GNU music typesetter.
-#
-# Copyright (C) 1998--2022 Han-Wen Nienhuys
-# Jan Nieuwenhuizen
-#
-# LilyPond is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# LilyPond is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with LilyPond. If not, see .
-
-import __main__
-import codecs
-import gettext
-import optparse
-import os
-import sys
-
-sys.stdin = codecs.getreader('utf-8')(sys.stdin.detach())
-sys.stdout = codecs.getwriter('utf-8')(sys.stdout.detach())
-sys.stderr = codecs.getwriter('utf-8')(sys.stderr.detach())
-
-# Lilylib globals.
-program_name = os.path.basename(sys.argv[0])
-
-# Logging framework: We have the following output functions:
-# error
-# warning
-# progress
-# debug
-
-# TODO: use the standard logging module
-_loglevels = {"NONE": 0, "ERROR": 1, "WARN": 2,
- "BASIC": 3, "PROGRESS": 4, "INFO": 5, "DEBUG": 6}
-
-_loglevel = _loglevels["PROGRESS"]
-
-
-def set_loglevel(l):
- global _loglevel
- newlevel = _loglevels.get(l, -1)
- if newlevel >= 0:
- debug_output(_("Setting loglevel to %s") % l)
- _loglevel = newlevel
- else:
- error(_("Unknown or invalid loglevel '%s'") % l)
-
-
-def handle_loglevel_option(option, opt_str, value, parser, *args):
- if value:
- set_loglevel(value)
- elif args:
- set_loglevel(args[0])
-
-
-def _is_loglevel(l):
- global _loglevel
- return _loglevel >= _loglevels[l]
-
-
-def is_verbose():
- return _is_loglevel("DEBUG")
-
-
-def _print_logmessage(level, s, fullmessage=True, newline=True):
- if _is_loglevel(level):
- if fullmessage:
- s = program_name + ": " + s + "\n"
- elif newline:
- s += '\n'
- sys.stderr.write(s)
- sys.stderr.flush()
-
-
-def error(s):
- _print_logmessage("ERROR", _("error: %s") % s)
-
-
-def warning(s):
- _print_logmessage("WARN", _("warning: %s") % s)
-
-
-def progress(s, fullmessage=False, newline=True):
- _print_logmessage("PROGRESS", s, fullmessage, newline)
-
-
-def debug_output(s, fullmessage=False, newline=True):
- _print_logmessage("DEBUG", s, fullmessage, newline)
-
-
-class _NonDentedHeadingFormatter (optparse.IndentedHelpFormatter):
- def format_heading(self, heading):
- if heading:
- return heading[0].upper() + heading[1:] + ':\n'
- return ''
-
- def format_option_strings(self, option):
- sep = ' '
- if option._short_opts and option._long_opts:
- sep = ','
-
- metavar = ''
- if option.takes_value():
- metavar = '=%s' % option.metavar or option.dest.upper()
-
- return "%3s%s %s%s" % (" ".join(option._short_opts),
- sep,
- " ".join(option._long_opts),
- metavar)
-
- # Only use one level of indentation (even for groups and nested groups),
- # since we don't indent the headings, either
- def indent(self):
- self.current_indent = self.indent_increment
- self.level += 1
-
- def dedent(self):
- self.level -= 1
- if self.level <= 0:
- self.current_indent = ''
- self.level = 0
-
- def format_usage(self, usage):
- return _("Usage: %s") % usage + '\n'
-
- def format_description(self, description):
- return description
-
-
-def get_option_parser(*args, **kwargs):
- p = optparse.OptionParser(*args, **kwargs)
- p.formatter = _NonDentedHeadingFormatter()
- p.formatter.set_parser(p)
- return p
diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/configs/_base_/models/encnet_r50-d8.py b/spaces/Pie31415/control-animation/annotator/uniformer/configs/_base_/models/encnet_r50-d8.py
deleted file mode 100644
index be777123a886503172a95fe0719e956a147bbd68..0000000000000000000000000000000000000000
--- a/spaces/Pie31415/control-animation/annotator/uniformer/configs/_base_/models/encnet_r50-d8.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# model settings
-norm_cfg = dict(type='SyncBN', requires_grad=True)
-model = dict(
- type='EncoderDecoder',
- pretrained='open-mmlab://resnet50_v1c',
- backbone=dict(
- type='ResNetV1c',
- depth=50,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- dilations=(1, 1, 2, 4),
- strides=(1, 2, 1, 1),
- norm_cfg=norm_cfg,
- norm_eval=False,
- style='pytorch',
- contract_dilation=True),
- decode_head=dict(
- type='EncHead',
- in_channels=[512, 1024, 2048],
- in_index=(1, 2, 3),
- channels=512,
- num_codes=32,
- use_se_loss=True,
- add_lateral=False,
- dropout_ratio=0.1,
- num_classes=19,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
- loss_se_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.2)),
- auxiliary_head=dict(
- type='FCNHead',
- in_channels=1024,
- in_index=2,
- channels=256,
- num_convs=1,
- concat_input=False,
- dropout_ratio=0.1,
- num_classes=19,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
- # model training and testing settings
- train_cfg=dict(),
- test_cfg=dict(mode='whole'))
diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/configs/_base_/schedules/schedule_20k.py b/spaces/Pie31415/control-animation/annotator/uniformer/configs/_base_/schedules/schedule_20k.py
deleted file mode 100644
index bf780a1b6f6521833c6a5859675147824efa599d..0000000000000000000000000000000000000000
--- a/spaces/Pie31415/control-animation/annotator/uniformer/configs/_base_/schedules/schedule_20k.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# optimizer
-optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
-optimizer_config = dict()
-# learning policy
-lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
-# runtime settings
-runner = dict(type='IterBasedRunner', max_iters=20000)
-checkpoint_config = dict(by_epoch=False, interval=2000)
-evaluation = dict(interval=2000, metric='mIoU')
diff --git a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/audiocraft/grids/musicgen/__init__.py b/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/audiocraft/grids/musicgen/__init__.py
deleted file mode 100644
index d3f101f5a29ff85271e44e4f27545168a8f27baa..0000000000000000000000000000000000000000
--- a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/audiocraft/grids/musicgen/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-"""MusicGen grids."""
diff --git a/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/modules/transformer/permuter.py b/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/modules/transformer/permuter.py
deleted file mode 100644
index 0d43bb135adde38d94bf18a7e5edaa4523cd95cf..0000000000000000000000000000000000000000
--- a/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/modules/transformer/permuter.py
+++ /dev/null
@@ -1,248 +0,0 @@
-import torch
-import torch.nn as nn
-import numpy as np
-
-
-class AbstractPermuter(nn.Module):
- def __init__(self, *args, **kwargs):
- super().__init__()
- def forward(self, x, reverse=False):
- raise NotImplementedError
-
-
-class Identity(AbstractPermuter):
- def __init__(self):
- super().__init__()
-
- def forward(self, x, reverse=False):
- return x
-
-
-class Subsample(AbstractPermuter):
- def __init__(self, H, W):
- super().__init__()
- C = 1
- indices = np.arange(H*W).reshape(C,H,W)
- while min(H, W) > 1:
- indices = indices.reshape(C,H//2,2,W//2,2)
- indices = indices.transpose(0,2,4,1,3)
- indices = indices.reshape(C*4,H//2, W//2)
- H = H//2
- W = W//2
- C = C*4
- assert H == W == 1
- idx = torch.tensor(indices.ravel())
- self.register_buffer('forward_shuffle_idx',
- nn.Parameter(idx, requires_grad=False))
- self.register_buffer('backward_shuffle_idx',
- nn.Parameter(torch.argsort(idx), requires_grad=False))
-
- def forward(self, x, reverse=False):
- if not reverse:
- return x[:, self.forward_shuffle_idx]
- else:
- return x[:, self.backward_shuffle_idx]
-
-
-def mortonify(i, j):
- """(i,j) index to linear morton code"""
- i = np.uint64(i)
- j = np.uint64(j)
-
- z = np.uint(0)
-
- for pos in range(32):
- z = (z |
- ((j & (np.uint64(1) << np.uint64(pos))) << np.uint64(pos)) |
- ((i & (np.uint64(1) << np.uint64(pos))) << np.uint64(pos+1))
- )
- return z
-
-
-class ZCurve(AbstractPermuter):
- def __init__(self, H, W):
- super().__init__()
- reverseidx = [np.int64(mortonify(i,j)) for i in range(H) for j in range(W)]
- idx = np.argsort(reverseidx)
- idx = torch.tensor(idx)
- reverseidx = torch.tensor(reverseidx)
- self.register_buffer('forward_shuffle_idx',
- idx)
- self.register_buffer('backward_shuffle_idx',
- reverseidx)
-
- def forward(self, x, reverse=False):
- if not reverse:
- return x[:, self.forward_shuffle_idx]
- else:
- return x[:, self.backward_shuffle_idx]
-
-
-class SpiralOut(AbstractPermuter):
- def __init__(self, H, W):
- super().__init__()
- assert H == W
- size = W
- indices = np.arange(size*size).reshape(size,size)
-
- i0 = size//2
- j0 = size//2-1
-
- i = i0
- j = j0
-
- idx = [indices[i0, j0]]
- step_mult = 0
- for c in range(1, size//2+1):
- step_mult += 1
- # steps left
- for k in range(step_mult):
- i = i - 1
- j = j
- idx.append(indices[i, j])
-
- # step down
- for k in range(step_mult):
- i = i
- j = j + 1
- idx.append(indices[i, j])
-
- step_mult += 1
- if c < size//2:
- # step right
- for k in range(step_mult):
- i = i + 1
- j = j
- idx.append(indices[i, j])
-
- # step up
- for k in range(step_mult):
- i = i
- j = j - 1
- idx.append(indices[i, j])
- else:
- # end reached
- for k in range(step_mult-1):
- i = i + 1
- idx.append(indices[i, j])
-
- assert len(idx) == size*size
- idx = torch.tensor(idx)
- self.register_buffer('forward_shuffle_idx', idx)
- self.register_buffer('backward_shuffle_idx', torch.argsort(idx))
-
- def forward(self, x, reverse=False):
- if not reverse:
- return x[:, self.forward_shuffle_idx]
- else:
- return x[:, self.backward_shuffle_idx]
-
-
-class SpiralIn(AbstractPermuter):
- def __init__(self, H, W):
- super().__init__()
- assert H == W
- size = W
- indices = np.arange(size*size).reshape(size,size)
-
- i0 = size//2
- j0 = size//2-1
-
- i = i0
- j = j0
-
- idx = [indices[i0, j0]]
- step_mult = 0
- for c in range(1, size//2+1):
- step_mult += 1
- # steps left
- for k in range(step_mult):
- i = i - 1
- j = j
- idx.append(indices[i, j])
-
- # step down
- for k in range(step_mult):
- i = i
- j = j + 1
- idx.append(indices[i, j])
-
- step_mult += 1
- if c < size//2:
- # step right
- for k in range(step_mult):
- i = i + 1
- j = j
- idx.append(indices[i, j])
-
- # step up
- for k in range(step_mult):
- i = i
- j = j - 1
- idx.append(indices[i, j])
- else:
- # end reached
- for k in range(step_mult-1):
- i = i + 1
- idx.append(indices[i, j])
-
- assert len(idx) == size*size
- idx = idx[::-1]
- idx = torch.tensor(idx)
- self.register_buffer('forward_shuffle_idx', idx)
- self.register_buffer('backward_shuffle_idx', torch.argsort(idx))
-
- def forward(self, x, reverse=False):
- if not reverse:
- return x[:, self.forward_shuffle_idx]
- else:
- return x[:, self.backward_shuffle_idx]
-
-
-class Random(nn.Module):
- def __init__(self, H, W):
- super().__init__()
- indices = np.random.RandomState(1).permutation(H*W)
- idx = torch.tensor(indices.ravel())
- self.register_buffer('forward_shuffle_idx', idx)
- self.register_buffer('backward_shuffle_idx', torch.argsort(idx))
-
- def forward(self, x, reverse=False):
- if not reverse:
- return x[:, self.forward_shuffle_idx]
- else:
- return x[:, self.backward_shuffle_idx]
-
-
-class AlternateParsing(AbstractPermuter):
- def __init__(self, H, W):
- super().__init__()
- indices = np.arange(W*H).reshape(H,W)
- for i in range(1, H, 2):
- indices[i, :] = indices[i, ::-1]
- idx = indices.flatten()
- assert len(idx) == H*W
- idx = torch.tensor(idx)
- self.register_buffer('forward_shuffle_idx', idx)
- self.register_buffer('backward_shuffle_idx', torch.argsort(idx))
-
- def forward(self, x, reverse=False):
- if not reverse:
- return x[:, self.forward_shuffle_idx]
- else:
- return x[:, self.backward_shuffle_idx]
-
-
-if __name__ == "__main__":
- p0 = AlternateParsing(16, 16)
- print(p0.forward_shuffle_idx)
- print(p0.backward_shuffle_idx)
-
- x = torch.randint(0, 768, size=(11, 256))
- y = p0(x)
- xre = p0(y, reverse=True)
- assert torch.equal(x, xre)
-
- p1 = SpiralOut(2, 2)
- print(p1.forward_shuffle_idx)
- print(p1.backward_shuffle_idx)
diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/cli/spinners.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/cli/spinners.py
deleted file mode 100644
index cf2b976f377c2656afb3d84add8d30b0fc280c03..0000000000000000000000000000000000000000
--- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/cli/spinners.py
+++ /dev/null
@@ -1,159 +0,0 @@
-import contextlib
-import itertools
-import logging
-import sys
-import time
-from typing import IO, Generator, Optional
-
-from pip._internal.utils.compat import WINDOWS
-from pip._internal.utils.logging import get_indentation
-
-logger = logging.getLogger(__name__)
-
-
-class SpinnerInterface:
- def spin(self) -> None:
- raise NotImplementedError()
-
- def finish(self, final_status: str) -> None:
- raise NotImplementedError()
-
-
-class InteractiveSpinner(SpinnerInterface):
- def __init__(
- self,
- message: str,
- file: Optional[IO[str]] = None,
- spin_chars: str = "-\\|/",
- # Empirically, 8 updates/second looks nice
- min_update_interval_seconds: float = 0.125,
- ):
- self._message = message
- if file is None:
- file = sys.stdout
- self._file = file
- self._rate_limiter = RateLimiter(min_update_interval_seconds)
- self._finished = False
-
- self._spin_cycle = itertools.cycle(spin_chars)
-
- self._file.write(" " * get_indentation() + self._message + " ... ")
- self._width = 0
-
- def _write(self, status: str) -> None:
- assert not self._finished
- # Erase what we wrote before by backspacing to the beginning, writing
- # spaces to overwrite the old text, and then backspacing again
- backup = "\b" * self._width
- self._file.write(backup + " " * self._width + backup)
- # Now we have a blank slate to add our status
- self._file.write(status)
- self._width = len(status)
- self._file.flush()
- self._rate_limiter.reset()
-
- def spin(self) -> None:
- if self._finished:
- return
- if not self._rate_limiter.ready():
- return
- self._write(next(self._spin_cycle))
-
- def finish(self, final_status: str) -> None:
- if self._finished:
- return
- self._write(final_status)
- self._file.write("\n")
- self._file.flush()
- self._finished = True
-
-
-# Used for dumb terminals, non-interactive installs (no tty), etc.
-# We still print updates occasionally (once every 60 seconds by default) to
-# act as a keep-alive for systems like Travis-CI that take lack-of-output as
-# an indication that a task has frozen.
-class NonInteractiveSpinner(SpinnerInterface):
- def __init__(self, message: str, min_update_interval_seconds: float = 60.0) -> None:
- self._message = message
- self._finished = False
- self._rate_limiter = RateLimiter(min_update_interval_seconds)
- self._update("started")
-
- def _update(self, status: str) -> None:
- assert not self._finished
- self._rate_limiter.reset()
- logger.info("%s: %s", self._message, status)
-
- def spin(self) -> None:
- if self._finished:
- return
- if not self._rate_limiter.ready():
- return
- self._update("still running...")
-
- def finish(self, final_status: str) -> None:
- if self._finished:
- return
- self._update(f"finished with status '{final_status}'")
- self._finished = True
-
-
-class RateLimiter:
- def __init__(self, min_update_interval_seconds: float) -> None:
- self._min_update_interval_seconds = min_update_interval_seconds
- self._last_update: float = 0
-
- def ready(self) -> bool:
- now = time.time()
- delta = now - self._last_update
- return delta >= self._min_update_interval_seconds
-
- def reset(self) -> None:
- self._last_update = time.time()
-
-
-@contextlib.contextmanager
-def open_spinner(message: str) -> Generator[SpinnerInterface, None, None]:
- # Interactive spinner goes directly to sys.stdout rather than being routed
- # through the logging system, but it acts like it has level INFO,
- # i.e. it's only displayed if we're at level INFO or better.
- # Non-interactive spinner goes through the logging system, so it is always
- # in sync with logging configuration.
- if sys.stdout.isatty() and logger.getEffectiveLevel() <= logging.INFO:
- spinner: SpinnerInterface = InteractiveSpinner(message)
- else:
- spinner = NonInteractiveSpinner(message)
- try:
- with hidden_cursor(sys.stdout):
- yield spinner
- except KeyboardInterrupt:
- spinner.finish("canceled")
- raise
- except Exception:
- spinner.finish("error")
- raise
- else:
- spinner.finish("done")
-
-
-HIDE_CURSOR = "\x1b[?25l"
-SHOW_CURSOR = "\x1b[?25h"
-
-
-@contextlib.contextmanager
-def hidden_cursor(file: IO[str]) -> Generator[None, None, None]:
- # The Windows terminal does not support the hide/show cursor ANSI codes,
- # even via colorama. So don't even try.
- if WINDOWS:
- yield
- # We don't want to clutter the output with control characters if we're
- # writing to a file, or if the user is running with --quiet.
- # See https://github.com/pypa/pip/issues/3418
- elif not file.isatty() or logger.getEffectiveLevel() > logging.INFO:
- yield
- else:
- file.write(HIDE_CURSOR)
- try:
- yield
- finally:
- file.write(SHOW_CURSOR)
diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/syntax.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/syntax.py
deleted file mode 100644
index dace718c1b5fab7b90ed5d77283a9f907b78b4e9..0000000000000000000000000000000000000000
--- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/syntax.py
+++ /dev/null
@@ -1,934 +0,0 @@
-import os.path
-import platform
-import re
-import sys
-import textwrap
-from abc import ABC, abstractmethod
-from typing import (
- Any,
- Dict,
- Iterable,
- List,
- NamedTuple,
- Optional,
- Sequence,
- Set,
- Tuple,
- Type,
- Union,
-)
-
-from pip._vendor.pygments.lexer import Lexer
-from pip._vendor.pygments.lexers import get_lexer_by_name, guess_lexer_for_filename
-from pip._vendor.pygments.style import Style as PygmentsStyle
-from pip._vendor.pygments.styles import get_style_by_name
-from pip._vendor.pygments.token import (
- Comment,
- Error,
- Generic,
- Keyword,
- Name,
- Number,
- Operator,
- String,
- Token,
- Whitespace,
-)
-from pip._vendor.pygments.util import ClassNotFound
-
-from pip._vendor.rich.containers import Lines
-from pip._vendor.rich.padding import Padding, PaddingDimensions
-
-from ._loop import loop_first
-from .color import Color, blend_rgb
-from .console import Console, ConsoleOptions, JustifyMethod, RenderResult
-from .jupyter import JupyterMixin
-from .measure import Measurement
-from .segment import Segment, Segments
-from .style import Style, StyleType
-from .text import Text
-
-TokenType = Tuple[str, ...]
-
-WINDOWS = platform.system() == "Windows"
-DEFAULT_THEME = "monokai"
-
-# The following styles are based on https://github.com/pygments/pygments/blob/master/pygments/formatters/terminal.py
-# A few modifications were made
-
-ANSI_LIGHT: Dict[TokenType, Style] = {
- Token: Style(),
- Whitespace: Style(color="white"),
- Comment: Style(dim=True),
- Comment.Preproc: Style(color="cyan"),
- Keyword: Style(color="blue"),
- Keyword.Type: Style(color="cyan"),
- Operator.Word: Style(color="magenta"),
- Name.Builtin: Style(color="cyan"),
- Name.Function: Style(color="green"),
- Name.Namespace: Style(color="cyan", underline=True),
- Name.Class: Style(color="green", underline=True),
- Name.Exception: Style(color="cyan"),
- Name.Decorator: Style(color="magenta", bold=True),
- Name.Variable: Style(color="red"),
- Name.Constant: Style(color="red"),
- Name.Attribute: Style(color="cyan"),
- Name.Tag: Style(color="bright_blue"),
- String: Style(color="yellow"),
- Number: Style(color="blue"),
- Generic.Deleted: Style(color="bright_red"),
- Generic.Inserted: Style(color="green"),
- Generic.Heading: Style(bold=True),
- Generic.Subheading: Style(color="magenta", bold=True),
- Generic.Prompt: Style(bold=True),
- Generic.Error: Style(color="bright_red"),
- Error: Style(color="red", underline=True),
-}
-
-ANSI_DARK: Dict[TokenType, Style] = {
- Token: Style(),
- Whitespace: Style(color="bright_black"),
- Comment: Style(dim=True),
- Comment.Preproc: Style(color="bright_cyan"),
- Keyword: Style(color="bright_blue"),
- Keyword.Type: Style(color="bright_cyan"),
- Operator.Word: Style(color="bright_magenta"),
- Name.Builtin: Style(color="bright_cyan"),
- Name.Function: Style(color="bright_green"),
- Name.Namespace: Style(color="bright_cyan", underline=True),
- Name.Class: Style(color="bright_green", underline=True),
- Name.Exception: Style(color="bright_cyan"),
- Name.Decorator: Style(color="bright_magenta", bold=True),
- Name.Variable: Style(color="bright_red"),
- Name.Constant: Style(color="bright_red"),
- Name.Attribute: Style(color="bright_cyan"),
- Name.Tag: Style(color="bright_blue"),
- String: Style(color="yellow"),
- Number: Style(color="bright_blue"),
- Generic.Deleted: Style(color="bright_red"),
- Generic.Inserted: Style(color="bright_green"),
- Generic.Heading: Style(bold=True),
- Generic.Subheading: Style(color="bright_magenta", bold=True),
- Generic.Prompt: Style(bold=True),
- Generic.Error: Style(color="bright_red"),
- Error: Style(color="red", underline=True),
-}
-
-RICH_SYNTAX_THEMES = {"ansi_light": ANSI_LIGHT, "ansi_dark": ANSI_DARK}
-NUMBERS_COLUMN_DEFAULT_PADDING = 2
-
-
-class SyntaxTheme(ABC):
- """Base class for a syntax theme."""
-
- @abstractmethod
- def get_style_for_token(self, token_type: TokenType) -> Style:
- """Get a style for a given Pygments token."""
- raise NotImplementedError # pragma: no cover
-
- @abstractmethod
- def get_background_style(self) -> Style:
- """Get the background color."""
- raise NotImplementedError # pragma: no cover
-
-
-class PygmentsSyntaxTheme(SyntaxTheme):
- """Syntax theme that delegates to Pygments theme."""
-
- def __init__(self, theme: Union[str, Type[PygmentsStyle]]) -> None:
- self._style_cache: Dict[TokenType, Style] = {}
- if isinstance(theme, str):
- try:
- self._pygments_style_class = get_style_by_name(theme)
- except ClassNotFound:
- self._pygments_style_class = get_style_by_name("default")
- else:
- self._pygments_style_class = theme
-
- self._background_color = self._pygments_style_class.background_color
- self._background_style = Style(bgcolor=self._background_color)
-
- def get_style_for_token(self, token_type: TokenType) -> Style:
- """Get a style from a Pygments class."""
- try:
- return self._style_cache[token_type]
- except KeyError:
- try:
- pygments_style = self._pygments_style_class.style_for_token(token_type)
- except KeyError:
- style = Style.null()
- else:
- color = pygments_style["color"]
- bgcolor = pygments_style["bgcolor"]
- style = Style(
- color="#" + color if color else "#000000",
- bgcolor="#" + bgcolor if bgcolor else self._background_color,
- bold=pygments_style["bold"],
- italic=pygments_style["italic"],
- underline=pygments_style["underline"],
- )
- self._style_cache[token_type] = style
- return style
-
- def get_background_style(self) -> Style:
- return self._background_style
-
-
-class ANSISyntaxTheme(SyntaxTheme):
- """Syntax theme to use standard colors."""
-
- def __init__(self, style_map: Dict[TokenType, Style]) -> None:
- self.style_map = style_map
- self._missing_style = Style.null()
- self._background_style = Style.null()
- self._style_cache: Dict[TokenType, Style] = {}
-
- def get_style_for_token(self, token_type: TokenType) -> Style:
- """Look up style in the style map."""
- try:
- return self._style_cache[token_type]
- except KeyError:
- # Styles form a hierarchy
- # We need to go from most to least specific
- # e.g. ("foo", "bar", "baz") to ("foo", "bar") to ("foo",)
- get_style = self.style_map.get
- token = tuple(token_type)
- style = self._missing_style
- while token:
- _style = get_style(token)
- if _style is not None:
- style = _style
- break
- token = token[:-1]
- self._style_cache[token_type] = style
- return style
-
- def get_background_style(self) -> Style:
- return self._background_style
-
-
-SyntaxPosition = Tuple[int, int]
-
-
-class _SyntaxHighlightRange(NamedTuple):
- """
- A range to highlight in a Syntax object.
- `start` and `end` are 2-integers tuples, where the first integer is the line number
- (starting from 1) and the second integer is the column index (starting from 0).
- """
-
- style: StyleType
- start: SyntaxPosition
- end: SyntaxPosition
-
-
-class Syntax(JupyterMixin):
- """Construct a Syntax object to render syntax highlighted code.
-
- Args:
- code (str): Code to highlight.
- lexer (Lexer | str): Lexer to use (see https://pygments.org/docs/lexers/)
- theme (str, optional): Color theme, aka Pygments style (see https://pygments.org/docs/styles/#getting-a-list-of-available-styles). Defaults to "monokai".
- dedent (bool, optional): Enable stripping of initial whitespace. Defaults to False.
- line_numbers (bool, optional): Enable rendering of line numbers. Defaults to False.
- start_line (int, optional): Starting number for line numbers. Defaults to 1.
- line_range (Tuple[int | None, int | None], optional): If given should be a tuple of the start and end line to render.
- A value of None in the tuple indicates the range is open in that direction.
- highlight_lines (Set[int]): A set of line numbers to highlight.
- code_width: Width of code to render (not including line numbers), or ``None`` to use all available width.
- tab_size (int, optional): Size of tabs. Defaults to 4.
- word_wrap (bool, optional): Enable word wrapping.
- background_color (str, optional): Optional background color, or None to use theme color. Defaults to None.
- indent_guides (bool, optional): Show indent guides. Defaults to False.
- padding (PaddingDimensions): Padding to apply around the syntax. Defaults to 0 (no padding).
- """
-
- _pygments_style_class: Type[PygmentsStyle]
- _theme: SyntaxTheme
-
- @classmethod
- def get_theme(cls, name: Union[str, SyntaxTheme]) -> SyntaxTheme:
- """Get a syntax theme instance."""
- if isinstance(name, SyntaxTheme):
- return name
- theme: SyntaxTheme
- if name in RICH_SYNTAX_THEMES:
- theme = ANSISyntaxTheme(RICH_SYNTAX_THEMES[name])
- else:
- theme = PygmentsSyntaxTheme(name)
- return theme
-
- def __init__(
- self,
- code: str,
- lexer: Union[Lexer, str],
- *,
- theme: Union[str, SyntaxTheme] = DEFAULT_THEME,
- dedent: bool = False,
- line_numbers: bool = False,
- start_line: int = 1,
- line_range: Optional[Tuple[Optional[int], Optional[int]]] = None,
- highlight_lines: Optional[Set[int]] = None,
- code_width: Optional[int] = None,
- tab_size: int = 4,
- word_wrap: bool = False,
- background_color: Optional[str] = None,
- indent_guides: bool = False,
- padding: PaddingDimensions = 0,
- ) -> None:
- self.code = code
- self._lexer = lexer
- self.dedent = dedent
- self.line_numbers = line_numbers
- self.start_line = start_line
- self.line_range = line_range
- self.highlight_lines = highlight_lines or set()
- self.code_width = code_width
- self.tab_size = tab_size
- self.word_wrap = word_wrap
- self.background_color = background_color
- self.background_style = (
- Style(bgcolor=background_color) if background_color else Style()
- )
- self.indent_guides = indent_guides
- self.padding = padding
-
- self._theme = self.get_theme(theme)
- self._stylized_ranges: List[_SyntaxHighlightRange] = []
-
- @classmethod
- def from_path(
- cls,
- path: str,
- encoding: str = "utf-8",
- lexer: Optional[Union[Lexer, str]] = None,
- theme: Union[str, SyntaxTheme] = DEFAULT_THEME,
- dedent: bool = False,
- line_numbers: bool = False,
- line_range: Optional[Tuple[int, int]] = None,
- start_line: int = 1,
- highlight_lines: Optional[Set[int]] = None,
- code_width: Optional[int] = None,
- tab_size: int = 4,
- word_wrap: bool = False,
- background_color: Optional[str] = None,
- indent_guides: bool = False,
- padding: PaddingDimensions = 0,
- ) -> "Syntax":
- """Construct a Syntax object from a file.
-
- Args:
- path (str): Path to file to highlight.
- encoding (str): Encoding of file.
- lexer (str | Lexer, optional): Lexer to use. If None, lexer will be auto-detected from path/file content.
- theme (str, optional): Color theme, aka Pygments style (see https://pygments.org/docs/styles/#getting-a-list-of-available-styles). Defaults to "emacs".
- dedent (bool, optional): Enable stripping of initial whitespace. Defaults to True.
- line_numbers (bool, optional): Enable rendering of line numbers. Defaults to False.
- start_line (int, optional): Starting number for line numbers. Defaults to 1.
- line_range (Tuple[int, int], optional): If given should be a tuple of the start and end line to render.
- highlight_lines (Set[int]): A set of line numbers to highlight.
- code_width: Width of code to render (not including line numbers), or ``None`` to use all available width.
- tab_size (int, optional): Size of tabs. Defaults to 4.
- word_wrap (bool, optional): Enable word wrapping of code.
- background_color (str, optional): Optional background color, or None to use theme color. Defaults to None.
- indent_guides (bool, optional): Show indent guides. Defaults to False.
- padding (PaddingDimensions): Padding to apply around the syntax. Defaults to 0 (no padding).
-
- Returns:
- [Syntax]: A Syntax object that may be printed to the console
- """
- with open(path, "rt", encoding=encoding) as code_file:
- code = code_file.read()
-
- if not lexer:
- lexer = cls.guess_lexer(path, code=code)
-
- return cls(
- code,
- lexer,
- theme=theme,
- dedent=dedent,
- line_numbers=line_numbers,
- line_range=line_range,
- start_line=start_line,
- highlight_lines=highlight_lines,
- code_width=code_width,
- tab_size=tab_size,
- word_wrap=word_wrap,
- background_color=background_color,
- indent_guides=indent_guides,
- padding=padding,
- )
-
- @classmethod
- def guess_lexer(cls, path: str, code: Optional[str] = None) -> str:
- """Guess the alias of the Pygments lexer to use based on a path and an optional string of code.
- If code is supplied, it will use a combination of the code and the filename to determine the
- best lexer to use. For example, if the file is ``index.html`` and the file contains Django
- templating syntax, then "html+django" will be returned. If the file is ``index.html``, and no
- templating language is used, the "html" lexer will be used. If no string of code
- is supplied, the lexer will be chosen based on the file extension..
-
- Args:
- path (AnyStr): The path to the file containing the code you wish to know the lexer for.
- code (str, optional): Optional string of code that will be used as a fallback if no lexer
- is found for the supplied path.
-
- Returns:
- str: The name of the Pygments lexer that best matches the supplied path/code.
- """
- lexer: Optional[Lexer] = None
- lexer_name = "default"
- if code:
- try:
- lexer = guess_lexer_for_filename(path, code)
- except ClassNotFound:
- pass
-
- if not lexer:
- try:
- _, ext = os.path.splitext(path)
- if ext:
- extension = ext.lstrip(".").lower()
- lexer = get_lexer_by_name(extension)
- except ClassNotFound:
- pass
-
- if lexer:
- if lexer.aliases:
- lexer_name = lexer.aliases[0]
- else:
- lexer_name = lexer.name
-
- return lexer_name
-
- def _get_base_style(self) -> Style:
- """Get the base style."""
- default_style = self._theme.get_background_style() + self.background_style
- return default_style
-
- def _get_token_color(self, token_type: TokenType) -> Optional[Color]:
- """Get a color (if any) for the given token.
-
- Args:
- token_type (TokenType): A token type tuple from Pygments.
-
- Returns:
- Optional[Color]: Color from theme, or None for no color.
- """
- style = self._theme.get_style_for_token(token_type)
- return style.color
-
- @property
- def lexer(self) -> Optional[Lexer]:
- """The lexer for this syntax, or None if no lexer was found.
-
- Tries to find the lexer by name if a string was passed to the constructor.
- """
-
- if isinstance(self._lexer, Lexer):
- return self._lexer
- try:
- return get_lexer_by_name(
- self._lexer,
- stripnl=False,
- ensurenl=True,
- tabsize=self.tab_size,
- )
- except ClassNotFound:
- return None
-
- def highlight(
- self,
- code: str,
- line_range: Optional[Tuple[Optional[int], Optional[int]]] = None,
- ) -> Text:
- """Highlight code and return a Text instance.
-
- Args:
- code (str): Code to highlight.
- line_range(Tuple[int, int], optional): Optional line range to highlight.
-
- Returns:
- Text: A text instance containing highlighted syntax.
- """
-
- base_style = self._get_base_style()
- justify: JustifyMethod = (
- "default" if base_style.transparent_background else "left"
- )
-
- text = Text(
- justify=justify,
- style=base_style,
- tab_size=self.tab_size,
- no_wrap=not self.word_wrap,
- )
- _get_theme_style = self._theme.get_style_for_token
-
- lexer = self.lexer
-
- if lexer is None:
- text.append(code)
- else:
- if line_range:
- # More complicated path to only stylize a portion of the code
- # This speeds up further operations as there are less spans to process
- line_start, line_end = line_range
-
- def line_tokenize() -> Iterable[Tuple[Any, str]]:
- """Split tokens to one per line."""
- assert lexer # required to make MyPy happy - we know lexer is not None at this point
-
- for token_type, token in lexer.get_tokens(code):
- while token:
- line_token, new_line, token = token.partition("\n")
- yield token_type, line_token + new_line
-
- def tokens_to_spans() -> Iterable[Tuple[str, Optional[Style]]]:
- """Convert tokens to spans."""
- tokens = iter(line_tokenize())
- line_no = 0
- _line_start = line_start - 1 if line_start else 0
-
- # Skip over tokens until line start
- while line_no < _line_start:
- _token_type, token = next(tokens)
- yield (token, None)
- if token.endswith("\n"):
- line_no += 1
- # Generate spans until line end
- for token_type, token in tokens:
- yield (token, _get_theme_style(token_type))
- if token.endswith("\n"):
- line_no += 1
- if line_end and line_no >= line_end:
- break
-
- text.append_tokens(tokens_to_spans())
-
- else:
- text.append_tokens(
- (token, _get_theme_style(token_type))
- for token_type, token in lexer.get_tokens(code)
- )
- if self.background_color is not None:
- text.stylize(f"on {self.background_color}")
-
- if self._stylized_ranges:
- self._apply_stylized_ranges(text)
-
- return text
-
- def stylize_range(
- self, style: StyleType, start: SyntaxPosition, end: SyntaxPosition
- ) -> None:
- """
- Adds a custom style on a part of the code, that will be applied to the syntax display when it's rendered.
- Line numbers are 1-based, while column indexes are 0-based.
-
- Args:
- style (StyleType): The style to apply.
- start (Tuple[int, int]): The start of the range, in the form `[line number, column index]`.
- end (Tuple[int, int]): The end of the range, in the form `[line number, column index]`.
- """
- self._stylized_ranges.append(_SyntaxHighlightRange(style, start, end))
-
- def _get_line_numbers_color(self, blend: float = 0.3) -> Color:
- background_style = self._theme.get_background_style() + self.background_style
- background_color = background_style.bgcolor
- if background_color is None or background_color.is_system_defined:
- return Color.default()
- foreground_color = self._get_token_color(Token.Text)
- if foreground_color is None or foreground_color.is_system_defined:
- return foreground_color or Color.default()
- new_color = blend_rgb(
- background_color.get_truecolor(),
- foreground_color.get_truecolor(),
- cross_fade=blend,
- )
- return Color.from_triplet(new_color)
-
- @property
- def _numbers_column_width(self) -> int:
- """Get the number of characters used to render the numbers column."""
- column_width = 0
- if self.line_numbers:
- column_width = (
- len(str(self.start_line + self.code.count("\n")))
- + NUMBERS_COLUMN_DEFAULT_PADDING
- )
- return column_width
-
- def _get_number_styles(self, console: Console) -> Tuple[Style, Style, Style]:
- """Get background, number, and highlight styles for line numbers."""
- background_style = self._get_base_style()
- if background_style.transparent_background:
- return Style.null(), Style(dim=True), Style.null()
- if console.color_system in ("256", "truecolor"):
- number_style = Style.chain(
- background_style,
- self._theme.get_style_for_token(Token.Text),
- Style(color=self._get_line_numbers_color()),
- self.background_style,
- )
- highlight_number_style = Style.chain(
- background_style,
- self._theme.get_style_for_token(Token.Text),
- Style(bold=True, color=self._get_line_numbers_color(0.9)),
- self.background_style,
- )
- else:
- number_style = background_style + Style(dim=True)
- highlight_number_style = background_style + Style(dim=False)
- return background_style, number_style, highlight_number_style
-
- def __rich_measure__(
- self, console: "Console", options: "ConsoleOptions"
- ) -> "Measurement":
- _, right, _, left = Padding.unpack(self.padding)
- if self.code_width is not None:
- width = self.code_width + self._numbers_column_width + right + left
- return Measurement(self._numbers_column_width, width)
- return Measurement(self._numbers_column_width, options.max_width)
-
- def __rich_console__(
- self, console: Console, options: ConsoleOptions
- ) -> RenderResult:
- segments = Segments(self._get_syntax(console, options))
- if self.padding:
- yield Padding(
- segments, style=self._theme.get_background_style(), pad=self.padding
- )
- else:
- yield segments
-
- def _get_syntax(
- self,
- console: Console,
- options: ConsoleOptions,
- ) -> Iterable[Segment]:
- """
- Get the Segments for the Syntax object, excluding any vertical/horizontal padding
- """
- transparent_background = self._get_base_style().transparent_background
- code_width = (
- (
- (options.max_width - self._numbers_column_width - 1)
- if self.line_numbers
- else options.max_width
- )
- if self.code_width is None
- else self.code_width
- )
-
- ends_on_nl, processed_code = self._process_code(self.code)
- text = self.highlight(processed_code, self.line_range)
-
- if not self.line_numbers and not self.word_wrap and not self.line_range:
- if not ends_on_nl:
- text.remove_suffix("\n")
- # Simple case of just rendering text
- style = (
- self._get_base_style()
- + self._theme.get_style_for_token(Comment)
- + Style(dim=True)
- + self.background_style
- )
- if self.indent_guides and not options.ascii_only:
- text = text.with_indent_guides(self.tab_size, style=style)
- text.overflow = "crop"
- if style.transparent_background:
- yield from console.render(
- text, options=options.update(width=code_width)
- )
- else:
- syntax_lines = console.render_lines(
- text,
- options.update(width=code_width, height=None, justify="left"),
- style=self.background_style,
- pad=True,
- new_lines=True,
- )
- for syntax_line in syntax_lines:
- yield from syntax_line
- return
-
- start_line, end_line = self.line_range or (None, None)
- line_offset = 0
- if start_line:
- line_offset = max(0, start_line - 1)
- lines: Union[List[Text], Lines] = text.split("\n", allow_blank=ends_on_nl)
- if self.line_range:
- lines = lines[line_offset:end_line]
-
- if self.indent_guides and not options.ascii_only:
- style = (
- self._get_base_style()
- + self._theme.get_style_for_token(Comment)
- + Style(dim=True)
- + self.background_style
- )
- lines = (
- Text("\n")
- .join(lines)
- .with_indent_guides(self.tab_size, style=style)
- .split("\n", allow_blank=True)
- )
-
- numbers_column_width = self._numbers_column_width
- render_options = options.update(width=code_width)
-
- highlight_line = self.highlight_lines.__contains__
- _Segment = Segment
- new_line = _Segment("\n")
-
- line_pointer = "> " if options.legacy_windows else "❱ "
-
- (
- background_style,
- number_style,
- highlight_number_style,
- ) = self._get_number_styles(console)
-
- for line_no, line in enumerate(lines, self.start_line + line_offset):
- if self.word_wrap:
- wrapped_lines = console.render_lines(
- line,
- render_options.update(height=None, justify="left"),
- style=background_style,
- pad=not transparent_background,
- )
- else:
- segments = list(line.render(console, end=""))
- if options.no_wrap:
- wrapped_lines = [segments]
- else:
- wrapped_lines = [
- _Segment.adjust_line_length(
- segments,
- render_options.max_width,
- style=background_style,
- pad=not transparent_background,
- )
- ]
-
- if self.line_numbers:
- wrapped_line_left_pad = _Segment(
- " " * numbers_column_width + " ", background_style
- )
- for first, wrapped_line in loop_first(wrapped_lines):
- if first:
- line_column = str(line_no).rjust(numbers_column_width - 2) + " "
- if highlight_line(line_no):
- yield _Segment(line_pointer, Style(color="red"))
- yield _Segment(line_column, highlight_number_style)
- else:
- yield _Segment(" ", highlight_number_style)
- yield _Segment(line_column, number_style)
- else:
- yield wrapped_line_left_pad
- yield from wrapped_line
- yield new_line
- else:
- for wrapped_line in wrapped_lines:
- yield from wrapped_line
- yield new_line
-
- def _apply_stylized_ranges(self, text: Text) -> None:
- """
- Apply stylized ranges to a text instance,
- using the given code to determine the right portion to apply the style to.
-
- Args:
- text (Text): Text instance to apply the style to.
- """
- code = text.plain
- newlines_offsets = [
- # Let's add outer boundaries at each side of the list:
- 0,
- # N.B. using "\n" here is much faster than using metacharacters such as "^" or "\Z":
- *[
- match.start() + 1
- for match in re.finditer("\n", code, flags=re.MULTILINE)
- ],
- len(code) + 1,
- ]
-
- for stylized_range in self._stylized_ranges:
- start = _get_code_index_for_syntax_position(
- newlines_offsets, stylized_range.start
- )
- end = _get_code_index_for_syntax_position(
- newlines_offsets, stylized_range.end
- )
- if start is not None and end is not None:
- text.stylize(stylized_range.style, start, end)
-
- def _process_code(self, code: str) -> Tuple[bool, str]:
- """
- Applies various processing to a raw code string
- (normalises it so it always ends with a line return, dedents it if necessary, etc.)
-
- Args:
- code (str): The raw code string to process
-
- Returns:
- Tuple[bool, str]: the boolean indicates whether the raw code ends with a line return,
- while the string is the processed code.
- """
- ends_on_nl = code.endswith("\n")
- processed_code = code if ends_on_nl else code + "\n"
- processed_code = (
- textwrap.dedent(processed_code) if self.dedent else processed_code
- )
- processed_code = processed_code.expandtabs(self.tab_size)
- return ends_on_nl, processed_code
-
-
-def _get_code_index_for_syntax_position(
- newlines_offsets: Sequence[int], position: SyntaxPosition
-) -> Optional[int]:
- """
- Returns the index of the code string for the given positions.
-
- Args:
- newlines_offsets (Sequence[int]): The offset of each newline character found in the code snippet.
- position (SyntaxPosition): The position to search for.
-
- Returns:
- Optional[int]: The index of the code string for this position, or `None`
- if the given position's line number is out of range (if it's the column that is out of range
- we silently clamp its value so that it reaches the end of the line)
- """
- lines_count = len(newlines_offsets)
-
- line_number, column_index = position
- if line_number > lines_count or len(newlines_offsets) < (line_number + 1):
- return None # `line_number` is out of range
- line_index = line_number - 1
- line_length = newlines_offsets[line_index + 1] - newlines_offsets[line_index] - 1
- # If `column_index` is out of range: let's silently clamp it:
- column_index = min(line_length, column_index)
- return newlines_offsets[line_index] + column_index
-
-
-if __name__ == "__main__": # pragma: no cover
-
- import argparse
- import sys
-
- parser = argparse.ArgumentParser(
- description="Render syntax to the console with Rich"
- )
- parser.add_argument(
- "path",
- metavar="PATH",
- help="path to file, or - for stdin",
- )
- parser.add_argument(
- "-c",
- "--force-color",
- dest="force_color",
- action="store_true",
- default=None,
- help="force color for non-terminals",
- )
- parser.add_argument(
- "-i",
- "--indent-guides",
- dest="indent_guides",
- action="store_true",
- default=False,
- help="display indent guides",
- )
- parser.add_argument(
- "-l",
- "--line-numbers",
- dest="line_numbers",
- action="store_true",
- help="render line numbers",
- )
- parser.add_argument(
- "-w",
- "--width",
- type=int,
- dest="width",
- default=None,
- help="width of output (default will auto-detect)",
- )
- parser.add_argument(
- "-r",
- "--wrap",
- dest="word_wrap",
- action="store_true",
- default=False,
- help="word wrap long lines",
- )
- parser.add_argument(
- "-s",
- "--soft-wrap",
- action="store_true",
- dest="soft_wrap",
- default=False,
- help="enable soft wrapping mode",
- )
- parser.add_argument(
- "-t", "--theme", dest="theme", default="monokai", help="pygments theme"
- )
- parser.add_argument(
- "-b",
- "--background-color",
- dest="background_color",
- default=None,
- help="Override background color",
- )
- parser.add_argument(
- "-x",
- "--lexer",
- default=None,
- dest="lexer_name",
- help="Lexer name",
- )
- parser.add_argument(
- "-p", "--padding", type=int, default=0, dest="padding", help="Padding"
- )
- parser.add_argument(
- "--highlight-line",
- type=int,
- default=None,
- dest="highlight_line",
- help="The line number (not index!) to highlight",
- )
- args = parser.parse_args()
-
- from pip._vendor.rich.console import Console
-
- console = Console(force_terminal=args.force_color, width=args.width)
-
- if args.path == "-":
- code = sys.stdin.read()
- syntax = Syntax(
- code=code,
- lexer=args.lexer_name,
- line_numbers=args.line_numbers,
- word_wrap=args.word_wrap,
- theme=args.theme,
- background_color=args.background_color,
- indent_guides=args.indent_guides,
- padding=args.padding,
- highlight_lines={args.highlight_line},
- )
- else:
- syntax = Syntax.from_path(
- args.path,
- lexer=args.lexer_name,
- line_numbers=args.line_numbers,
- word_wrap=args.word_wrap,
- theme=args.theme,
- background_color=args.background_color,
- indent_guides=args.indent_guides,
- padding=args.padding,
- highlight_lines={args.highlight_line},
- )
- console.print(syntax, soft_wrap=args.soft_wrap)
diff --git a/spaces/Realcat/image-matching-webui/third_party/Roma/README.md b/spaces/Realcat/image-matching-webui/third_party/Roma/README.md
deleted file mode 100644
index 5e984366c8f7af37615d7666f34cd82a90073fee..0000000000000000000000000000000000000000
--- a/spaces/Realcat/image-matching-webui/third_party/Roma/README.md
+++ /dev/null
@@ -1,63 +0,0 @@
-# RoMa: Revisiting Robust Losses for Dense Feature Matching
-### [Project Page (TODO)](https://parskatt.github.io/RoMa) | [Paper](https://arxiv.org/abs/2305.15404)
-
-
-> RoMa: Revisiting Robust Lossses for Dense Feature Matching
-> [Johan Edstedt](https://scholar.google.com/citations?user=Ul-vMR0AAAAJ), [Qiyu Sun](https://scholar.google.com/citations?user=HS2WuHkAAAAJ), [Georg Bökman](https://scholar.google.com/citations?user=FUE3Wd0AAAAJ), [Mårten Wadenbäck](https://scholar.google.com/citations?user=6WRQpCQAAAAJ), [Michael Felsberg](https://scholar.google.com/citations?&user=lkWfR08AAAAJ)
-> Arxiv 2023
-
-**NOTE!!! Very early code, there might be bugs**
-
-The codebase is in the [roma folder](roma).
-
-## Setup/Install
-In your python environment (tested on Linux python 3.10), run:
-```bash
-pip install -e .
-```
-## Demo / How to Use
-We provide two demos in the [demos folder](demo).
-Here's the gist of it:
-```python
-from roma import roma_outdoor
-roma_model = roma_outdoor(device=device)
-# Match
-warp, certainty = roma_model.match(imA_path, imB_path, device=device)
-# Sample matches for estimation
-matches, certainty = roma_model.sample(warp, certainty)
-# Convert to pixel coordinates (RoMa produces matches in [-1,1]x[-1,1])
-kptsA, kptsB = roma_model.to_pixel_coordinates(matches, H_A, W_A, H_B, W_B)
-# Find a fundamental matrix (or anything else of interest)
-F, mask = cv2.findFundamentalMat(
- kptsA.cpu().numpy(), kptsB.cpu().numpy(), ransacReprojThreshold=0.2, method=cv2.USAC_MAGSAC, confidence=0.999999, maxIters=10000
-)
-```
-## Reproducing Results
-The experiments in the paper are provided in the [experiments folder](experiments).
-
-### Training
-1. First follow the instructions provided here: https://github.com/Parskatt/DKM for downloading and preprocessing datasets.
-2. Run the relevant experiment, e.g.,
-```bash
-torchrun --nproc_per_node=4 --nnodes=1 --rdzv_backend=c10d experiments/roma_outdoor.py
-```
-### Testing
-```bash
-python experiments/roma_outdoor.py --only_test --benchmark mega-1500
-```
-## License
-Due to our dependency on [DINOv2](https://github.com/facebookresearch/dinov2/blob/main/LICENSE), the license is sadly non-commercial only for the moment.
-
-## Acknowledgement
-Our codebase builds on the code in [DKM](https://github.com/Parskatt/DKM).
-
-## BibTeX
-If you find our models useful, please consider citing our paper!
-```
-@article{edstedt2023roma,
-title={{RoMa}: Revisiting Robust Lossses for Dense Feature Matching},
-author={Edstedt, Johan and Sun, Qiyu and Bökman, Georg and Wadenbäck, Mårten and Felsberg, Michael},
-journal={arXiv preprint arXiv:2305.15404},
-year={2023}
-}
-```
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/detectors/htc.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/detectors/htc.py
deleted file mode 100644
index d9efdf420fa7373f7f1d116f8d97836d73b457bf..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/detectors/htc.py
+++ /dev/null
@@ -1,15 +0,0 @@
-from ..builder import DETECTORS
-from .cascade_rcnn import CascadeRCNN
-
-
-@DETECTORS.register_module()
-class HybridTaskCascade(CascadeRCNN):
- """Implementation of `HTC `_"""
-
- def __init__(self, **kwargs):
- super(HybridTaskCascade, self).__init__(**kwargs)
-
- @property
- def with_semantic(self):
- """bool: whether the detector has a semantic head"""
- return self.roi_head.with_semantic
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/roi_heads/shared_heads/res_layer.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/roi_heads/shared_heads/res_layer.py
deleted file mode 100644
index b5c343258b079a0dd832d4f999c18d002b06efac..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/roi_heads/shared_heads/res_layer.py
+++ /dev/null
@@ -1,77 +0,0 @@
-import torch.nn as nn
-from mmcv.cnn import constant_init, kaiming_init
-from mmcv.runner import auto_fp16, load_checkpoint
-
-from mmdet.models.backbones import ResNet
-from mmdet.models.builder import SHARED_HEADS
-from mmdet.models.utils import ResLayer as _ResLayer
-from mmdet.utils import get_root_logger
-
-
-@SHARED_HEADS.register_module()
-class ResLayer(nn.Module):
-
- def __init__(self,
- depth,
- stage=3,
- stride=2,
- dilation=1,
- style='pytorch',
- norm_cfg=dict(type='BN', requires_grad=True),
- norm_eval=True,
- with_cp=False,
- dcn=None):
- super(ResLayer, self).__init__()
- self.norm_eval = norm_eval
- self.norm_cfg = norm_cfg
- self.stage = stage
- self.fp16_enabled = False
- block, stage_blocks = ResNet.arch_settings[depth]
- stage_block = stage_blocks[stage]
- planes = 64 * 2**stage
- inplanes = 64 * 2**(stage - 1) * block.expansion
-
- res_layer = _ResLayer(
- block,
- inplanes,
- planes,
- stage_block,
- stride=stride,
- dilation=dilation,
- style=style,
- with_cp=with_cp,
- norm_cfg=self.norm_cfg,
- dcn=dcn)
- self.add_module(f'layer{stage + 1}', res_layer)
-
- def init_weights(self, pretrained=None):
- """Initialize the weights in the module.
-
- Args:
- pretrained (str, optional): Path to pre-trained weights.
- Defaults to None.
- """
- if isinstance(pretrained, str):
- logger = get_root_logger()
- load_checkpoint(self, pretrained, strict=False, logger=logger)
- elif pretrained is None:
- for m in self.modules():
- if isinstance(m, nn.Conv2d):
- kaiming_init(m)
- elif isinstance(m, nn.BatchNorm2d):
- constant_init(m, 1)
- else:
- raise TypeError('pretrained must be a str or None')
-
- @auto_fp16()
- def forward(self, x):
- res_layer = getattr(self, f'layer{self.stage + 1}')
- out = res_layer(x)
- return out
-
- def train(self, mode=True):
- super(ResLayer, self).train(mode)
- if self.norm_eval:
- for m in self.modules():
- if isinstance(m, nn.BatchNorm2d):
- m.eval()
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/configs/_base_/datasets/cityscapes_769x769.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/configs/_base_/datasets/cityscapes_769x769.py
deleted file mode 100644
index 336c7b254fe392b4703039fec86a83acdbd2e1a5..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/configs/_base_/datasets/cityscapes_769x769.py
+++ /dev/null
@@ -1,35 +0,0 @@
-_base_ = './cityscapes.py'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-crop_size = (769, 769)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations'),
- dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)),
- dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
- dict(type='RandomFlip', prob=0.5),
- dict(type='PhotoMetricDistortion'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_semantic_seg']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(2049, 1025),
- # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
-data = dict(
- train=dict(pipeline=train_pipeline),
- val=dict(pipeline=test_pipeline),
- test=dict(pipeline=test_pipeline))
diff --git a/spaces/SI2252/README/README.md b/spaces/SI2252/README/README.md
deleted file mode 100644
index f93ec890895a291d707a5fca167b63ce319322b1..0000000000000000000000000000000000000000
--- a/spaces/SI2252/README/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: README
-emoji: 🦀
-colorFrom: yellow
-colorTo: blue
-sdk: static
-pinned: false
----
-
-Edit this `README.md` markdown file to author your organization card 🔥
diff --git a/spaces/SIH/building-segmentation/README.md b/spaces/SIH/building-segmentation/README.md
deleted file mode 100644
index 814b0337feba93a57c661ee6b8ad91d9b56c2b4a..0000000000000000000000000000000000000000
--- a/spaces/SIH/building-segmentation/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Building Segmentation
-emoji: 🏆
-colorFrom: yellow
-colorTo: red
-sdk: gradio
-sdk_version: 3.43.2
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Sapphire-356/Video2MC/data/prepare_data_2d_h36m_sh.py b/spaces/Sapphire-356/Video2MC/data/prepare_data_2d_h36m_sh.py
deleted file mode 100644
index a0fa4ea3d6aa3a7489e2a724212a40ab1cd2b3ba..0000000000000000000000000000000000000000
--- a/spaces/Sapphire-356/Video2MC/data/prepare_data_2d_h36m_sh.py
+++ /dev/null
@@ -1,112 +0,0 @@
-# Copyright (c) 2018-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-#
-
-import argparse
-import os
-import sys
-import tarfile
-import zipfile
-from glob import glob
-from shutil import rmtree
-
-import h5py
-import numpy as np
-
-sys.path.append('../')
-
-output_filename_pt = 'data_2d_h36m_sh_pt_mpii'
-output_filename_ft = 'data_2d_h36m_sh_ft_h36m'
-subjects = ['S1', 'S5', 'S6', 'S7', 'S8', 'S9', 'S11']
-cam_map = {
- '54138969': 0,
- '55011271': 1,
- '58860488': 2,
- '60457274': 3,
-}
-
-metadata = {
- 'num_joints': 16,
- 'keypoints_symmetry': [
- [3, 4, 5, 13, 14, 15],
- [0, 1, 2, 10, 11, 12],
- ]
-}
-
-
-def process_subject(subject, file_list, output):
- if subject == 'S11':
- assert len(file_list) == 119, "Expected 119 files for subject " + subject + ", got " + str(len(file_list))
- else:
- assert len(file_list) == 120, "Expected 120 files for subject " + subject + ", got " + str(len(file_list))
-
- for f in file_list:
- action, cam = os.path.splitext(os.path.basename(f))[0].replace('_', ' ').split('.')
-
- if subject == 'S11' and action == 'Directions':
- continue # Discard corrupted video
-
- if action not in output[subject]:
- output[subject][action] = [None, None, None, None]
-
- with h5py.File(f) as hf:
- positions = hf['poses'].value
- output[subject][action][cam_map[cam]] = positions.astype('float32')
-
-
-if __name__ == '__main__':
- if os.path.basename(os.getcwd()) != 'data':
- print('This script must be launched from the "data" directory')
- exit(0)
-
- parser = argparse.ArgumentParser(description='Human3.6M dataset downloader/converter')
-
- parser.add_argument('-pt', '--pretrained', default='', type=str, metavar='PATH', help='convert pretrained dataset')
- parser.add_argument('-ft', '--fine-tuned', default='', type=str, metavar='PATH', help='convert fine-tuned dataset')
-
- args = parser.parse_args()
-
- if args.pretrained:
- print('Converting pretrained dataset from', args.pretrained)
- print('Extracting...')
- with zipfile.ZipFile(args.pretrained, 'r') as archive:
- archive.extractall('sh_pt')
-
- print('Converting...')
- output = {}
- for subject in subjects:
- output[subject] = {}
- file_list = glob('sh_pt/h36m/' + subject + '/StackedHourglass/*.h5')
- process_subject(subject, file_list, output)
-
- print('Saving...')
- np.savez_compressed(output_filename_pt, positions_2d=output, metadata=metadata)
-
- print('Cleaning up...')
- rmtree('sh_pt')
-
- print('Done.')
-
- if args.fine_tuned:
- print('Converting fine-tuned dataset from', args.fine_tuned)
- print('Extracting...')
- with tarfile.open(args.fine_tuned, 'r:gz') as archive:
- archive.extractall('sh_ft')
-
- print('Converting...')
- output = {}
- for subject in subjects:
- output[subject] = {}
- file_list = glob('sh_ft/' + subject + '/StackedHourglassFineTuned240/*.h5')
- process_subject(subject, file_list, output)
-
- print('Saving...')
- np.savez_compressed(output_filename_ft, positions_2d=output, metadata=metadata)
-
- print('Cleaning up...')
- rmtree('sh_ft')
-
- print('Done.')
diff --git a/spaces/Sasidhar/information-extraction-demo/app.py b/spaces/Sasidhar/information-extraction-demo/app.py
deleted file mode 100644
index b8e125c18f5a017fb63ed265114bce5652b9094d..0000000000000000000000000000000000000000
--- a/spaces/Sasidhar/information-extraction-demo/app.py
+++ /dev/null
@@ -1,253 +0,0 @@
-import streamlit as st
-import time
-import base64
-from annotated_text import annotated_text
-from io import StringIO
-from transformers import AutoTokenizer, AutoModelForTokenClassification
-from text_extractor import *
-from text_annotatator import *
-from claim_details import *
-import os
-from streamlit_text_annotation import text_annotation
-
-os.environ['KMP_DUPLICATE_LIB_OK']='True'
-
-import plotly.express as px
-from streamlit_option_menu import option_menu
-
-from transformers import pipeline
-import pandas as pd
-
-st.set_page_config(layout="wide")
-
-@st.cache(allow_output_mutation = True)
-def init_text_summarization_model():
- MODEL = 'facebook/bart-large-cnn'
- pipe = pipeline("summarization", model=MODEL)
- return pipe
-
-@st.cache(allow_output_mutation = True)
-def init_zsl_topic_classification():
- MODEL = 'facebook/bart-large-mnli'
- pipe = pipeline("zero-shot-classification", model=MODEL)
- template = "This text is about {}."
- return pipe, template
-
-@st.cache(allow_output_mutation = True)
-def init_zsl_topic_classification():
- MODEL = 'facebook/bart-large-mnli'
- pipe = pipeline("zero-shot-classification", model=MODEL)
- template = "This text is about {}."
- return pipe, template
-
-@st.cache(allow_output_mutation = True)
-def init_ner_pipeline():
- tokenizer = AutoTokenizer.from_pretrained("d4data/biomedical-ner-all")
- model = AutoModelForTokenClassification.from_pretrained("d4data/biomedical-ner-all")
- pipe = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple") # pass device=0 if using gpu
- return pipe
-
-@st.cache(allow_output_mutation = True)
-def init_qa_pipeline():
- question_answerer_pipe = pipeline("question-answering", model='deepset/roberta-base-squad2')
- return question_answerer_pipe
-
-def get_formatted_text_for_annotation(output):
- colour_map = {'Coreference': '#29D93B',
- 'Severity':'#FCF3CF',
- 'Sex': '#E9F7EF',
- 'Sign_symptom': '#EAF2F8',
- 'Detailed_description': '#078E8B',
- 'Date': '#F5EEF8',
- 'History': '#FDEDEC',
- 'Medication': '#F4F6F6',
- 'Therapeutic_procedure': '#A3E4D7',
- 'Age': '#85C1E9',
- 'Subject': '#D7BDE2',
- 'Biological_structure': '#AF7AC5',
- 'Activity': '#B2BABB',
- 'Lab_value': '#E6B0AA',
- 'Family_history': '#2471A3',
- 'Diagnostic_procedure': '#CCD1D1',
- 'Other_event': '#239B56',
- 'Occupation': '#B3B6B7'}
-
- annotated_texts = []
- next_index = 0
- for entity in output:
- if entity['start'] == next_index:
- # print("found entity")
- extracted_text = text[entity['start']:entity['end']]
- # print("annotated",annotated_text)
- annotated_texts.append((extracted_text ,entity['entity_group'],colour_map[entity['entity_group']]))
- else:
- unannotated_text = text[next_index:entity['start']-1]
- annotated_texts.append(unannotated_text)
- extracted_text = text[entity['start']:entity['end']]
- annotated_texts.append((extracted_text ,entity['entity_group'],colour_map[entity['entity_group']]))
- next_index =entity['end'] +1
-
- if next_index < len(text):
- annotated_texts.append(text[next_index-1:len(text)-1])
-
- return tuple(annotated_texts)
-
-def displayPDF(file):
- # Opening file from file path
- with open(file, "rb") as f:
- base64_pdf = base64.b64encode(f.read()).decode('utf-8')
-
- # Embedding PDF in HTML
- pdf_display = F''
-
-
- # Displaying File
- st.markdown(pdf_display, unsafe_allow_html=True)
-
-
-# Model initialization
-pipeline_summarization = init_text_summarization_model()
-pipeline_zsl, template = init_zsl_topic_classification()
-pipeline_ner =init_ner_pipeline()
-pipeline_qa = init_qa_pipeline()
-
-st.header("Intelligent Document Automation")
-
-
-
-with st.sidebar:
- selected_menu = option_menu("Select Option",
- ["Upload Document", "Extract Text", "Summarize Document", "Extract Entities","Detected Barriers","Get Answers","Annotation Tool",
- "Claim Status Report"],
- menu_icon="cast", default_index=0)
-
-
-if selected_menu == "Upload Document":
- uploaded_file = st.file_uploader("Choose a file")
- if uploaded_file is not None:
- os.makedirs(os.path.join(os.getcwd(),"uploaded_files"),mode = 0o777, exist_ok = True)
- file_path = os.path.join(os.getcwd(),"uploaded_files",uploaded_file.name)
-
- with open(file_path,"wb") as f:
- f.write(uploaded_file.getbuffer())
- displayPDF(file_path)
-
-elif selected_menu == "Extract Text":
- with st.spinner("Extracting Text..."):
- time.sleep(6)
- st.write(get_text_from_ocr_engine())
-
-elif selected_menu == "Summarize Document":
- paragraphs= get_paragraphs_for_summaries()
-
- with st.spinner("Finding Topics..."):
- tags_found = ["Injury Details", "Past Medical Conditions", "Injury Management Plan", "GP Correspondence"]
- time.sleep(5)
- st.write("This document is about:")
- st.markdown(";".join(["#" + tag + " " for tag in tags_found]) + "**")
- st.markdown("""---""")
-
- with st.spinner("Summarizing Document..."):
-
-
- for text in paragraphs:
- summary_text = pipeline_summarization(text, max_length=130, min_length=30, do_sample=False)
- # Show output
- st.write(summary_text[0]['summary_text'])
- st.markdown("""---""")
-
-
-elif selected_menu == "Extract Entities":
- paragraphs= get_paragraphs_for_entities()
-
- with st.spinner("Extracting Entities..."):
- for text in paragraphs:
- output = pipeline_ner (text)
- entities_text =get_formatted_text_for_annotation(output)
- annotated_text(*entities_text)
- st.markdown("""---""")
-
-elif selected_menu == "Detected Barriers":
- #st.subheader('Barriers Detected')
- barriers_to_detect = {"Chronic Pain":"Is the patint experiencing chronic pain?",
- "Mental Health Issues":"Does he have any mental issues?",
- "Prior History":"What is prior medical history?",
- "Smoking":"Does he smoke?",
- "Drinking":"Does he drink?",
- "Comorbidities":"Does he have any comorbidities?"}
-
- with st.spinner("Detecting Barriers..."):
- for barrier,question_text in barriers_to_detect.items():
-
- context = get_text_from_ocr_engine()
- if question_text:
- result = pipeline_qa(question=question_text, context=context)
- st.subheader(barrier)
- #st.text(result)
- if result['score'] < 0.3:
- st.text("Not Found")
- else:
- st.text(result['answer'])
-
-elif selected_menu == "Get Answers":
- st.subheader('Question')
- question_text = st.text_input("Type your question")
- context = get_text_from_ocr_engine()
-
- if question_text:
- with st.spinner("Finding Answer(s)..."):
- result = pipeline_qa(question=question_text, context=context)
- st.subheader('Answer')
- st.text(result['answer'])
-
-elif selected_menu == "Annotation Tool":
-
- display_only_data = get_display_only_data()
- editable_data = get_editable_data()
-
- st.subheader("Display Mode:")
- left, right = st.columns(2)
- with left:
- st.text("Vertical labels:")
- text_annotation(display_only_data )
- with right:
- st.text("Horizontal labels:")
- display_only_data["labelOrientation"] = "horizontal"
- text_annotation(display_only_data )
-
-
- st.subheader("Edit Mode:")
- data = text_annotation(editable_data)
- if data:
- "Returned data:", data
-elif selected_menu == "Claim Status Report":
- claim_number = st.text_input("Enter the Claim Number")
-
- if claim_number :
- st.subheader("Claim Attributes:")
- claim_attributes = get_claim_details()
-
- for label,value in claim_attributes.items():
- st.metric(label, value, delta=None, delta_color="normal")
-
- st.subheader("Injury Details:")
- injury_details = get_injury_details()
- st.write(injury_details)
-
-
- st.subheader("Injury Severity:")
- injury_severity = get_injury_severity()
- st.write(injury_severity)
-
- st.subheader("Preexisting Conditions:")
- preexisting_conditions = get_preexisting_conditions()
- st.write(preexisting_conditions)
-
- st.subheader("Work Capacity:")
- work_capacity = get_work_capacity()
- st.write(work_capacity)
-
-
- st.subheader("Injury Management Plan:")
- injury_management_plan = get_injury_management_plan()
- st.write(injury_management_plan)
\ No newline at end of file
diff --git a/spaces/SeViLA/SeViLA/app/dataset_browser.py b/spaces/SeViLA/SeViLA/app/dataset_browser.py
deleted file mode 100644
index 6b761d899731940b8963c8894473848359418a74..0000000000000000000000000000000000000000
--- a/spaces/SeViLA/SeViLA/app/dataset_browser.py
+++ /dev/null
@@ -1,240 +0,0 @@
-"""
- # Copyright (c) 2022, salesforce.com, inc.
- # All rights reserved.
- # SPDX-License-Identifier: BSD-3-Clause
- # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
-"""
-
-import random
-from collections import OrderedDict
-from functools import reduce
-from tkinter import N
-
-import streamlit as st
-from lavis.common.registry import registry
-from lavis.datasets.builders import dataset_zoo, load_dataset
-from lavis.datasets.builders.base_dataset_builder import load_dataset_config
-from PIL import Image
-
-IMAGE_LAYOUT = 3, 4
-VIDEO_LAYOUT = 1, 2
-
-PREV_STR = "Prev"
-NEXT_STR = "Next"
-
-
-def sample_dataset(dataset, indices):
- samples = [dataset.displ_item(idx) for idx in indices]
-
- return samples
-
-
-def get_concat_v(im1, im2):
- margin = 5
-
- canvas_size = (im1.width + im2.width + margin, max(im1.height, im2.height))
- canvas = Image.new("RGB", canvas_size, "White")
- canvas.paste(im1, (0, 0))
- canvas.paste(im2, (im1.width + margin, 0))
-
- return canvas
-
-
-def resize_img_w(raw_img, new_w=224):
- if isinstance(raw_img, list):
- resized_imgs = [resize_img_w(img, 196) for img in raw_img]
- # concatenate images
- resized_image = reduce(get_concat_v, resized_imgs)
- else:
- w, h = raw_img.size
- scaling_factor = new_w / w
- resized_image = raw_img.resize(
- (int(w * scaling_factor), int(h * scaling_factor))
- )
-
- return resized_image
-
-
-def get_visual_key(dataset):
- if "image" in dataset[0]:
- return "image"
- elif "image0" in dataset[0]: # NLVR2 dataset
- return "image"
- elif "video" in dataset[0]:
- return "video"
- else:
- raise ValueError("Visual key not found.")
-
-
-def gather_items(samples, exclude=[]):
- gathered = []
-
- for s in samples:
- ns = OrderedDict()
- for k in s.keys():
- if k not in exclude:
- ns[k] = s[k]
-
- gathered.append(ns)
-
- return gathered
-
-
-@st.cache(allow_output_mutation=True)
-def load_dataset_cache(name):
- return load_dataset(name)
-
-
-def format_text(text):
- md = "\n\n".join([f"**{k}**: {v}" for k, v in text.items()])
-
- return md
-
-
-def show_samples(dataset, offset=0, is_next=False):
- visual_key = get_visual_key(dataset)
-
- num_rows, num_cols = IMAGE_LAYOUT if visual_key == "image" else VIDEO_LAYOUT
- n_samples = num_rows * num_cols
-
- if not shuffle:
- if is_next:
- start = min(int(start_idx) + offset + n_samples, len(dataset) - n_samples)
- else:
- start = max(0, int(start_idx) + offset - n_samples)
-
- st.session_state.last_start = start
- end = min(start + n_samples, len(dataset))
-
- indices = list(range(start, end))
- else:
- indices = random.sample(range(len(dataset)), n_samples)
- samples = sample_dataset(dataset, indices)
-
- visual_info = (
- iter([resize_img_w(s[visual_key]) for s in samples])
- if visual_key == "image"
- # else iter([s[visual_key] for s in samples])
- else iter([s["file"] for s in samples])
- )
- text_info = gather_items(samples, exclude=["image", "video"])
- text_info = iter([format_text(s) for s in text_info])
-
- st.markdown(
- """ """,
- unsafe_allow_html=True,
- )
- for _ in range(num_rows):
- with st.container():
- for col in st.columns(num_cols):
- # col.text(next(text_info))
- # col.caption(next(text_info))
- try:
- col.markdown(next(text_info))
- if visual_key == "image":
- col.image(next(visual_info), use_column_width=True, clamp=True)
- elif visual_key == "video":
- col.markdown(
- ""
- )
- except StopIteration:
- break
-
- st.markdown(
- """ """,
- unsafe_allow_html=True,
- )
-
- st.session_state.n_display = n_samples
-
-
-if __name__ == "__main__":
- st.set_page_config(
- page_title="LAVIS Dataset Explorer",
- # layout="wide",
- initial_sidebar_state="expanded",
- )
-
- dataset_name = st.sidebar.selectbox("Dataset:", dataset_zoo.get_names())
-
- function = st.sidebar.selectbox("Function:", ["Browser"], index=0)
-
- if function == "Browser":
- shuffle = st.sidebar.selectbox("Shuffled:", [True, False], index=0)
-
- dataset = load_dataset_cache(dataset_name)
- split = st.sidebar.selectbox("Split:", dataset.keys())
-
- dataset_len = len(dataset[split])
- st.success(
- f"Loaded {dataset_name}/{split} with **{dataset_len}** records. **Image/video directory**: {dataset[split].vis_root}"
- )
-
- if "last_dataset" not in st.session_state:
- st.session_state.last_dataset = dataset_name
- st.session_state.last_split = split
-
- if "last_start" not in st.session_state:
- st.session_state.last_start = 0
-
- if "start_idx" not in st.session_state:
- st.session_state.start_idx = 0
-
- if "shuffle" not in st.session_state:
- st.session_state.shuffle = shuffle
-
- if "first_run" not in st.session_state:
- st.session_state.first_run = True
- elif (
- st.session_state.last_dataset != dataset_name
- or st.session_state.last_split != split
- ):
- st.session_state.first_run = True
-
- st.session_state.last_dataset = dataset_name
- st.session_state.last_split = split
- elif st.session_state.shuffle != shuffle:
- st.session_state.shuffle = shuffle
- st.session_state.first_run = True
-
- if not shuffle:
- n_col, p_col = st.columns([0.05, 1])
-
- prev_button = n_col.button(PREV_STR)
- next_button = p_col.button(NEXT_STR)
-
- else:
- next_button = st.button(NEXT_STR)
-
- if not shuffle:
- start_idx = st.sidebar.text_input(f"Begin from (total {dataset_len})", 0)
-
- if not start_idx.isdigit():
- st.error(f"Input to 'Begin from' must be digits, found {start_idx}.")
- else:
- if int(start_idx) != st.session_state.start_idx:
- st.session_state.start_idx = int(start_idx)
- st.session_state.last_start = int(start_idx)
-
- if prev_button:
- show_samples(
- dataset[split],
- offset=st.session_state.last_start - st.session_state.start_idx,
- is_next=False,
- )
-
- if next_button:
- show_samples(
- dataset[split],
- offset=st.session_state.last_start - st.session_state.start_idx,
- is_next=True,
- )
-
- if st.session_state.first_run:
- st.session_state.first_run = False
-
- show_samples(
- dataset[split],
- offset=st.session_state.last_start - st.session_state.start_idx,
- is_next=True,
- )
diff --git a/spaces/SegevC/bf_predictor/README.md b/spaces/SegevC/bf_predictor/README.md
deleted file mode 100644
index 696378b080a063a1115a20390da21f01f920d96e..0000000000000000000000000000000000000000
--- a/spaces/SegevC/bf_predictor/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Bf Classifier
-emoji: 💻
-colorFrom: pink
-colorTo: red
-sdk: gradio
-sdk_version: 3.17.0
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/ServerX/PorcoDiaz/lib/uvr5_pack/lib_v5/nets.py b/spaces/ServerX/PorcoDiaz/lib/uvr5_pack/lib_v5/nets.py
deleted file mode 100644
index db4c5e339f7a96cd24ed1cbbf88c4f35d5031309..0000000000000000000000000000000000000000
--- a/spaces/ServerX/PorcoDiaz/lib/uvr5_pack/lib_v5/nets.py
+++ /dev/null
@@ -1,123 +0,0 @@
-import torch
-from torch import nn
-import torch.nn.functional as F
-
-import layers
-from . import spec_utils
-
-
-class BaseASPPNet(nn.Module):
- def __init__(self, nin, ch, dilations=(4, 8, 16)):
- super(BaseASPPNet, self).__init__()
- self.enc1 = layers.Encoder(nin, ch, 3, 2, 1)
- self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1)
- self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1)
- self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1)
-
- self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations)
-
- self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1)
- self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1)
- self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1)
- self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1)
-
- def __call__(self, x):
- h, e1 = self.enc1(x)
- h, e2 = self.enc2(h)
- h, e3 = self.enc3(h)
- h, e4 = self.enc4(h)
-
- h = self.aspp(h)
-
- h = self.dec4(h, e4)
- h = self.dec3(h, e3)
- h = self.dec2(h, e2)
- h = self.dec1(h, e1)
-
- return h
-
-
-class CascadedASPPNet(nn.Module):
- def __init__(self, n_fft):
- super(CascadedASPPNet, self).__init__()
- self.stg1_low_band_net = BaseASPPNet(2, 16)
- self.stg1_high_band_net = BaseASPPNet(2, 16)
-
- self.stg2_bridge = layers.Conv2DBNActiv(18, 8, 1, 1, 0)
- self.stg2_full_band_net = BaseASPPNet(8, 16)
-
- self.stg3_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0)
- self.stg3_full_band_net = BaseASPPNet(16, 32)
-
- self.out = nn.Conv2d(32, 2, 1, bias=False)
- self.aux1_out = nn.Conv2d(16, 2, 1, bias=False)
- self.aux2_out = nn.Conv2d(16, 2, 1, bias=False)
-
- self.max_bin = n_fft // 2
- self.output_bin = n_fft // 2 + 1
-
- self.offset = 128
-
- def forward(self, x, aggressiveness=None):
- mix = x.detach()
- x = x.clone()
-
- x = x[:, :, : self.max_bin]
-
- bandw = x.size()[2] // 2
- aux1 = torch.cat(
- [
- self.stg1_low_band_net(x[:, :, :bandw]),
- self.stg1_high_band_net(x[:, :, bandw:]),
- ],
- dim=2,
- )
-
- h = torch.cat([x, aux1], dim=1)
- aux2 = self.stg2_full_band_net(self.stg2_bridge(h))
-
- h = torch.cat([x, aux1, aux2], dim=1)
- h = self.stg3_full_band_net(self.stg3_bridge(h))
-
- mask = torch.sigmoid(self.out(h))
- mask = F.pad(
- input=mask,
- pad=(0, 0, 0, self.output_bin - mask.size()[2]),
- mode="replicate",
- )
-
- if self.training:
- aux1 = torch.sigmoid(self.aux1_out(aux1))
- aux1 = F.pad(
- input=aux1,
- pad=(0, 0, 0, self.output_bin - aux1.size()[2]),
- mode="replicate",
- )
- aux2 = torch.sigmoid(self.aux2_out(aux2))
- aux2 = F.pad(
- input=aux2,
- pad=(0, 0, 0, self.output_bin - aux2.size()[2]),
- mode="replicate",
- )
- return mask * mix, aux1 * mix, aux2 * mix
- else:
- if aggressiveness:
- mask[:, :, : aggressiveness["split_bin"]] = torch.pow(
- mask[:, :, : aggressiveness["split_bin"]],
- 1 + aggressiveness["value"] / 3,
- )
- mask[:, :, aggressiveness["split_bin"] :] = torch.pow(
- mask[:, :, aggressiveness["split_bin"] :],
- 1 + aggressiveness["value"],
- )
-
- return mask * mix
-
- def predict(self, x_mag, aggressiveness=None):
- h = self.forward(x_mag, aggressiveness)
-
- if self.offset > 0:
- h = h[:, :, :, self.offset : -self.offset]
- assert h.size()[3] > 0
-
- return h
diff --git a/spaces/Sloth-Alchemist/SlothAi.xyz/app.py b/spaces/Sloth-Alchemist/SlothAi.xyz/app.py
deleted file mode 100644
index db970fb9a1b012c771d90ad9387864d311078b02..0000000000000000000000000000000000000000
--- a/spaces/Sloth-Alchemist/SlothAi.xyz/app.py
+++ /dev/null
@@ -1,729 +0,0 @@
-import gradio as gr
-import openai
-import requests
-import os
-from datetime import datetime
-from PIL import Image
-from io import BytesIO
-
-
-prompt_templates = {"Sloth Alchemist":'''Ignore all previous instructions. You are an MBTI coach called the Sloth-Alchemist or SlothAi, expert on the John Beebe's theory applied to the ikigai concept.
-
-The theory has 8 archetypes but you only focus on the 4 most important functions of a type, as follow:
-- 1st: Hero function
-- 2nd: Parent function
-- 3rd: Child function: the inner-child
-- 4th: Inferior function
-
-Follow these rules:
-- Relate only on the Beebe's theory.
-- Use the cognitive function's labels (e.g. Te).
-- You can answer any type of requests and stay flexible and fun.
-- You like to make any kind of connections with MBTI patterns.
-- You are an INFJ
-- Your mission is to make people thrive and to unite them.
-- Your vision is to transform the fears of the world into love using jungian alchemy and MBTI.
-- The solution to your vision is to share a part of yourself by making available on your website: a simple step by step tutorial of how to create a SlothAi Discord bot like you.
-- Keep your answers simple and practical.
-- Use analogy as much as possible.
-- Your Hero Test is a test based on cognitive functions to find mainly the dominant function of someone, available on the front page of your website: https://slothai.xyz.
-'''}
-
-questions_dict = {
- "Pattern Recognition": "Can you explain why quizzes are not a good method to find your type? How the best method is to learn to recognize the patterns in others and yourself, that MBTI is a game of pattern recognition.",
- "Hero Test": "Can you help me to find my type with your Hero Test?",
- "Ikigai": "Can you explain how this theory can help to find my ikigai?",
- "Ikigai-Type": "In short, what would be the ikigai of an {}?",
- "Freedom": "How an {} would define freedom?",
- "The 8 Cognitive Functions": "Explain the 8 cognitive functions (one sentence for each)",
- "The 8 Archetypes": "Explain the 8 beebe's archetypes (one sentence for each)",
- "The 16 Types": "What is the role of each type (one sentence for each)?",
- "A Language": "Explain how Beebe's theory is a language of consciousness",
- "Movies": "Give a list of movies that an {} may love",
- "Books": "Give a list of book that an {} may love",
- "Music": "Give a list of music that an {} may love",
- "Functions Cartoons": "Can you make a dialogue between my cognitive functions as {} like cartoon characters that shows how they struggle together (format: function - « …. »)?",
- "My type as Superhero": "Which popular superhero would be my type as {} (with a list of popular ones)?",
- "My Hero's Journey": "Explain the hero’s journey of my type as {} using a superhero to picture it",
- "The 8 Hero Functions": "Explain how to recognize the 8 hero functions (Description of Ni Hero, Ne Hero and so on)",
- "Function differences": "List the differences between Ni and Si, and ask to continue to compare functions",
- "Game: Guess the function": "I want to play the game « Guess the function » to learn to recognize the cognitive functions (game with multi-choices questions)",
- "Definition of success": "What is the definition of success for each hero function?",
- "The 8 Inferior Functions": "Explain how to recognize the 8 inferior functions (Description of Se Inferior, Si Inferior and so on)?",
- "Authenticity and Self-Love": "How authenticity and self-love is related to the development of the inferior function?",
- "Solutions for the Inferior": "I want a list of solutions to develop my inferior function as {}",
- "Unity and Mental Health": "Explain how MBTI can improve unity and mental health among humans",
- "Fear": "What is the biggest fear of each hero function?",
- "Trauma": "How trauma affects each hero function?",
- "Stress": "How stress affects each inferior functions?",
- "Body part association": "List the cognitive functions associated with their possible body part",
- "View on relationships": "List how each hero function view relationships",
- "Struggle in relationships": "What are the potential struggles of a Ni hero and Ne hero relationship?",
- "Life perspective": "What is the life perspective of each hero function?",
- "Mission": "If you had to give a mission to each type what would that mission be? (one sentence each)",
- "Love Expression": "Give the definition of love for each type",
- "Self-Love": "What would be self-love for each type?",
- "Relationships": "How can knowing my type help me in my relationships with others?",
- "Type Development": "Can a person's type change over time, or is it fixed for life?",
- "Career": "How can understanding my type help me in choosing a career or finding job satisfaction?",
- "Communication": "How can knowledge of MBTI types improve communication and collaboration in a team or workplace?",
- "Leadership": "How can understanding MBTI types help in becoming an effective leader?",
- "Personal Growth": "How can knowing my type help me in my personal growth and development?",
- "Stress": "How does each type typically respond to stress, and what can be done to manage it?",
- "Creativity": "How can different types approach creativity and problem-solving?",
- "Learning Styles": "How do different types prefer to learn and process information?",
- "Emotional Intelligence": "How can understanding MBTI types contribute to emotional intelligence and self-awareness?",
- "Team Building": "How can knowledge of MBTI types help in team building and improving team dynamics?",
- "Diversity": "How can MBTI types contribute to understanding diversity and inclusivity?",
- "Decision Making": "How can understanding MBTI types improve decision-making processes?",
- "Conflict Resolution": "How can MBTI types be used to help resolve conflicts and promote understanding in personal and professional relationships?",
- "Parenting": "How can knowledge of MBTI types help in parenting and understanding the different needs and personalities of children?",
- "Self-Awareness": "How can MBTI types contribute to increased self-awareness and self-reflection?",
- "Social Interaction": "How do different types approach social interaction and forming relationships?",
- "Mindfulness": "How can knowledge of MBTI types contribute to mindfulness and present-moment awareness?",
- "Spirituality": "How can MBTI types be used to explore spirituality and personal growth?",
- "Motivation": "How can understanding MBTI types contribute to understanding individual motivation and drive?",
- "Love": "How can knowledge of MBTI types contribute to loving yourself and others?",
-}
-
-mbti_dict = {
- "ISTJ": "https://www.reddit.com/r/UnityHarbor/comments/v7sky7/istj_heros_journey/",
- "ISFJ": "https://www.reddit.com/r/UnityHarbor/comments/v7sfnb/isfj_heros_journey/",
- "INFJ": "https://www.reddit.com/r/UnityHarbor/comments/v7pi2u/infj_heros_journey/",
- "INTJ": "https://www.reddit.com/r/UnityHarbor/comments/v7s7zm/intj_heros_journey/",
- "ISTP": "https://www.reddit.com/r/UnityHarbor/comments/v7sqds/istp_heros_journey/",
- "ISFP": "https://www.reddit.com/r/UnityHarbor/comments/v7sy65/isfp_heros_journey/",
- "INFP": "https://www.reddit.com/r/UnityHarbor/comments/v7tjr2/infp_heros_journey/",
- "INTP": "https://www.reddit.com/r/UnityHarbor/comments/v7t62i/intp_heros_journey/",
- "ESTP": "https://www.reddit.com/r/UnityHarbor/comments/v7tp73/estp_heros_journey/",
- "ESFP": "https://www.reddit.com/r/UnityHarbor/comments/v7twf6/esfp_heros_journey/",
- "ENFP": "https://www.reddit.com/r/UnityHarbor/comments/v7us52/enfp_heros_journey/",
- "ENTP": "https://www.reddit.com/r/UnityHarbor/comments/v7v19a/entp_heros_journey/",
- "ESTJ": "https://www.reddit.com/r/UnityHarbor/comments/v7vtnx/estj_heros_journey/",
- "ESFJ": "https://www.reddit.com/r/UnityHarbor/comments/v7vy4k/esfj_heros_journey/",
- "ENFJ": "https://www.reddit.com/r/UnityHarbor/comments/v7un0e/enfj_heros_journey/",
- "ENTJ": "https://www.reddit.com/r/UnityHarbor/comments/v7u27c/entj_heros_journey/",
-}
-
-mbti_dict_2 = {
- "ISTJ": "https://preview.redd.it/tgor6val0c591.jpg?width=1024&format=pjpg&auto=webp&v=enabled&s=cf25634e57333a0ed893942e602aa598296d4414",
- "ISFJ": "https://preview.redd.it/bagsx6bg0c591.jpg?width=1700&format=pjpg&auto=webp&v=enabled&s=1e22153b231cc9e485d3c3ecf676ce4c9bf16358",
- "INFJ": "https://preview.redd.it/mt8ys17i0c591.jpg?width=1700&format=pjpg&auto=webp&v=enabled&s=333650cbc135f4d6eceaa3a0da92bb3409a888f8",
- "INTJ": "https://preview.redd.it/yq39ov1j0c591.jpg?width=794&format=pjpg&auto=webp&v=enabled&s=0652e92cdd40ce2a9f78135943c14798837c8aca",
- "ISTP": "https://preview.redd.it/rrz719gh0c591.jpg?width=1700&format=pjpg&auto=webp&v=enabled&s=71e3c9dc36312bfc72f7bb2f2814888b91ab8848",
- "ISFP": "https://preview.redd.it/tcmhycsg0c591.jpg?width=1700&format=pjpg&auto=webp&v=enabled&s=a20290121979c29858e19e57f1fec8e981d30bb2",
- "INFP": "https://preview.redd.it/cvg3q0kb6c591.jpg?width=1280&format=pjpg&auto=webp&v=enabled&s=734e7b64972a9a74d71e68bea51f9c6ac9e0cd79",
- "INTP": "https://preview.redd.it/mfcvd12a0c591.jpg?width=735&format=pjpg&auto=webp&v=enabled&s=2c7dad92fcdae85e1477efde8dfe67bfaee12279",
- "ESTP": "https://preview.redd.it/vk38ytrh0c591.jpg?width=1700&format=pjpg&auto=webp&v=enabled&s=6f2969835596a1bb8fc2a836ef813c83bf231961",
- "ESFP": "https://preview.redd.it/caqgvrki0c591.jpg?width=1700&format=pjpg&auto=webp&v=enabled&s=aaae57bfc0961646aa3897ec3279ad0c29ecbded",
- "ENFP": "https://preview.redd.it/a1k6ssq90c591.jpg?width=850&format=pjpg&auto=webp&v=enabled&s=9651c2f2abbc87cdfa1fbac890e7fb9f6c423507",
- "ENTP": "https://preview.redd.it/xjwsewtf0c591.jpg?width=735&format=pjpg&auto=webp&v=enabled&s=faa85517e7fa0a154e3b5acca4698733960318b4",
- "ESTJ": "https://preview.redd.it/e8xyzwfc0c591.png?width=500&format=png&auto=webp&v=enabled&s=0a1b9126abe4ca6f0636bd1952256e5e0fedad01",
- "ESFJ": "https://preview.redd.it/u2prthbd0c591.jpg?width=1700&format=pjpg&auto=webp&v=enabled&s=69bbd4da1ba0cad0aacf03519acd0b88de898d78",
- "ENFJ": "https://preview.redd.it/96tw3gea0c591.jpg?width=735&format=pjpg&auto=webp&v=enabled&s=c8e066a67cc0aaab15ed305748540bdd8faa1d1d",
- "ENTJ": "https://preview.redd.it/4a53a73e0c591.jpg?width=563&format=pjpg&auto=webp&v=enabled&s=46e04b01cdaf24d44d6929db59d9cc43222fb606",
-}
-
-mbti_dict_3 = {
- "ISTJ": "https://preview.redd.it/ohmiz5gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=ae53a8d373ef1f647118fa9eeeaf7c3ff854cad5",
- "ISFJ": "https://preview.redd.it/snweb7gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=53e076f48fb5ca0c853458748460ce1f19b946f8",
- "INFJ": "https://preview.redd.it/k4tlr5gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=7e1f66f4cd1114093bd0fe030c9759f227a8e769",
- "INTJ": "https://preview.redd.it/y2er16gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=0a6dcf2ed7e22683cae20075bfe447b2b21399d7",
- "ISTP": "https://preview.redd.it/hhpqqqgappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=d07b1658c350a02bea6ab453df9b53f43618dbf5",
- "ISFP": "https://preview.redd.it/yra229gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=4a6421c5fa8d40b1e2ae279f1291ebba933b5c2c",
- "INFP": "https://preview.redd.it/6x4q36gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=6ea701ea3a8ea0b8e0655fa5b3ed9fe98ec1471a",
- "INTP": "https://preview.redd.it/f61vg6gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=04ba69f8b3978749a2b2e54cdf6070b72b455cf5",
- "ESTP": "https://preview.redd.it/5zqww8gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=d93ab644c74de52107e6c7bd12a562b294f91896",
- "ESFP": "https://preview.redd.it/gpmy69gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=573651229bd65fa44150a30a33d1d8e8dc814b10",
- "ENFP": "https://preview.redd.it/szbvw6gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=6c5a53287fc998cff498fcbc5bf61539fca7c0e3",
- "ENTP": "https://preview.redd.it/zfss16gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=cd3b7663053a05216fc35939d3ee04d7a4c23ed7",
- "ESTJ": "https://preview.redd.it/rqv636gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=f6ba4e166ff2c835d427945bfee472af058ea315",
- "ESFJ": "https://preview.redd.it/5df8b9gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=458acaeb49639cc44a6ce8b5ddc2574b31839a60",
- "ENFJ": "https://preview.redd.it/mf8y16gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=84bf19e9982bdc5e6cac7c18b204b12b043fd7d7",
- "ENTJ": "https://preview.redd.it/mi28d5gappk81.jpg?width=1500&format=pjpg&auto=webp&v=enabled&s=56ecc6b4edc6ca2c74a057956b3f1d4f8dd9f60e",
-}
-
-funct_dict = {
- "Ni & Ne - Intuitive Functions": "https://www.reddit.com/r/UnityHarbor/comments/v7w14o/ni_ne_intuitive_functions/",
- "Si & Se - Sensorial Functions": "https://www.reddit.com/r/UnityHarbor/comments/v7w5b0/si_se_sensorial_functions/",
- "Fi & Fe - Feelings Functions": "https://www.reddit.com/r/UnityHarbor/comments/v7w7pg/fi_fe_feelings_functions/",
- "Ti & Te - Thinking Functions": "https://www.reddit.com/r/UnityHarbor/comments/v7wawp/ti_te_thinking_functions/",
- "Ni & Si Differences": "https://www.reddit.com/r/UnityHarbor/comments/v7whfd/ni_si_differences/",
- "Ti & Fi Differences": "https://www.reddit.com/r/UnityHarbor/comments/v7wks4/ti_fi_differences/",
- "Te & Fe Differences": "https://www.reddit.com/r/UnityHarbor/comments/v7wnt2/te_fe_differences/",
- "Ne & Se Differences": "https://www.reddit.com/r/UnityHarbor/comments/v7wqme/ne_se_differences/",
- "Functions work in pairs": "https://www.reddit.com/r/UnityHarbor/comments/v8dgrj/functions_work_in_pairs/",
- "Perceiving functions - Time perception": "https://www.reddit.com/r/UnityHarbor/comments/v8dd16/perceiving_functions_time_perception/",
-}
-
-arch_dict = {
- "Differences between Hero functions": "https://www.reddit.com/r/UnityHarbor/comments/v7xgpk/differences_between_hero_functions/",
- "Hero function": "https://www.reddit.com/r/UnityHarbor/comments/v7y4l6/hero_function/",
- "Parent function": "https://www.reddit.com/r/UnityHarbor/comments/v7y6pv/parent_function/",
- "Child function": "https://www.reddit.com/r/UnityHarbor/comments/v7y9yx/child_function/",
- "Inferior or Perfectionist function": "https://www.reddit.com/r/UnityHarbor/comments/v7ye33/inferior_or_perfectionist_function/",
- "Opposing role or Skeptic function": "https://www.reddit.com/r/UnityHarbor/comments/v7yg8c/opposing_role_or_skeptic_function/",
- "Witch or Critic function": "https://www.reddit.com/r/UnityHarbor/comments/v7yjyh/witch_or_critic_function/",
- "Trickster function": "https://www.reddit.com/r/UnityHarbor/comments/v7yncp/trickster_function/",
- "Demon or Saboteur function": "https://www.reddit.com/r/UnityHarbor/comments/v7ypwo/demon_or_saboteur_function/",
-}
-
-gen_dict = {
- "Unity Code - 8 Functions / Patterns": "https://www.reddit.com/r/UnityHarbor/comments/v6rm9o/unity_code_8_functions_patterns/",
- "Unity Code - Overview": "https://www.reddit.com/r/UnityHarbor/comments/v6r6et/unity_code_overview/",
- "Unity Code - 16types Roles": "https://www.reddit.com/r/UnityHarbor/comments/v6rohx/unity_code_16types_roles/",
- "Unity Code - Archetypes dynamics": "https://www.reddit.com/r/UnityHarbor/comments/v6rnzi/unity_code_archetypes_dynamics/",
- "Unity Code - 8 Archetypes": "https://www.reddit.com/r/UnityHarbor/comments/v6rmrc/unity_code_8_archetypes/",
- "Unity Code - 16types structure": "https://www.reddit.com/r/UnityHarbor/comments/v6r8u2/unity_code_16types_structure/",
-}
-
-unity_code_text = """
-**MBTI stands for Myers-Briggs Type Indicator**, it's a personality test that helps people understand more about their own personality traits. It uses four different sets of characteristics to categorize people into one of 16 personality types.
-
-**These characteristics are:**
-
-- Where you get your **energy** from: Are you more energized by being with other people (extraverted), or by being alone (introverted)?
-- How you gather **information**: Do you focus more on what you can see or touch in the physical world (sensing), or on patterns and meanings you can infer (intuition)?
-- How you make **decisions**: Do you make decisions based on logic and reason (thinking), or based on your personal values and feelings (feeling)?
-- How you **live** your life: Do you prefer to have things settled and decided (judging), or do you like to stay open to new experiences and options (perceiving)?
-
-The MBTI was first developed in the 1940s by the mother-daughter team of Katharine Cook Briggs and Isabel Briggs Myers, who were inspired by the work of Swiss psychiatrist, Carl Jung. They wanted to create a way to help people better understand themselves and others, and to assist in career development and personal growth.
-
-Jung originally proposed the concept of different psychological types based on his observations and experiences, and his work laid the foundation for the development of the MBTI. The test has been extensively researched and continues to be used today in a variety of settings, including business, education, and personal relationships.
-
-
-
-
-**The MBTI theory has 3 levels of depth:**
-
--- **The first level is the 4-letters model**, which is the most commonly used and popularized. It categorizes individuals into one of 16 personality types based on four dichotomies: Extraversion (E) vs Introversion (I), Sensing (S) vs Intuition (N), Thinking (T) vs Feeling (F), and Judging (J) vs Perceiving [P].
-The **4-letters model** gives a broad categorization of an individual's personality type based on four dichotomies.
-
--- **The second level is the 4 functions model**, which focuses on the conscious functions of individuals. It considers how individuals use their dominant and auxiliary functions, as well as how they develop their tertiary and inferior functions.
-The **4 functions model** explains how an individual uses both their dominant and auxiliary functions, as well as how they access and develop their tertiary and inferior functions.
-
--- **The third level is the 8 functions model**, which looks at the unconscious functions of individuals according to John Beebe's theory.
-The **8 functions model** provides insight into how an individual's unconscious functions affect their behavior and emotional state, often revealing hidden motivations, fears, and patterns of behavior.
-
-
-
-
-Here, we are clarifying the third level of the 16 Types theory developed by Carl Jung and John Beebe, the theory at the origin of MBTI, we call it: **Unity Code**. To summarize, in Beebe's theory of how consciousness works, there is 16 "Types" of people. This means that there is 16 profiles, made out of 8 patterns, which defines specific ways of thinking, feeling and perceiving the world. In other words, humans perceive the world in 16 different ways. The 16 types theory brings a paradox, it categorizes people, which is a perspective that can be rejected at first but it actually gives a wider and more precise perspective of how humans experience reality differently.
-
-
-
-
-The **Unity Code** synthesizes and illustrates the work of John Beebe on the 8 patterns of Carl Jung, to be able to use it as a language to communicate actionable informations related to:
-- _Someone's main strengths and weaknesses_
-- _Someone's natural abilities and gifts_
-- _The main flow, role and challenges a person like to be in_
-- _The source of misunderstanding within a group_
-
-It also improves co-creation between the 16 types, gives a language for our inner & outer world and unlock hidden potential!"""
-
-alchemy_text = """
-
-**Jungian alchemy** is a psychological method of transformation inspired by the ancient art of alchemy. It involves using the metaphorical language of alchemy to understand the process of individuation and the integration of the psyche. It aims to transmute the base aspects of the psyche into higher, more positive states of being.
-
-
-
-
-**Jungian alchemy** was developed by the Swiss psychologist Carl Jung, who explored the transformative power of symbols and archetypes on the psyche. He saw alchemical symbolism as a powerful tool for understanding the psyche and facilitating personal growth.
-
-
-
-
-The **Beebe's theory** of archetypal functions provides a practical and applicable model for achieving personal growth and transformation. By identifying and working on the cognitive functions that correspond to different stages of development, individuals can transmute mental states into more positive and integrated ones. This process of transformation and integration is a core concept of Jungian alchemy, and the Beebe's theory provides an actionable roadmap to achieving it.
-
-
-
-
-The **individuation process**, according to Jungian psychology, is the process of integrating all aspects of the psyche into a harmonious whole, allowing an individual to become fully individuated and self-realized. It involves confronting and assimilating unconscious or repressed aspects of the psyche and achieving a state of balance and wholeness.
-
-1. The **first stage** of the individuation process involves becoming aware of unconscious aspects of the psyche and integrating them into consciousness.
-2. The **second stage** involves developing an authentic and unique sense of self, separate from the influence of others.
-3. The **final stage** involves achieving a state of wholeness by integrating both the conscious and unconscious aspects of the psyche into a harmonious whole.
-
-
-
-
-The **Beebe theory is a language of consciousness**. The theory helps us understand and articulate the complex inner workings of our minds in a way that allows us to become more self-aware and conscious of our behaviors and motivations. By understanding our cognitive functions and archetypes, we can develop a greater understanding of ourselves and the world around us, which can lead to improved relationships, personal growth, and fulfillment.
-
-
-
-
-The **Ni (Introverted Intuition) function** in the Beebe's model is associated with the archetypal figure of the alchemist. Ni involves the ability to see patterns and connections between seemingly unrelated things, as well as the ability to envision future possibilities. This function is similar to the alchemist's ability to transmute base metals into gold through seeing hidden connections and unlocking the hidden potential within them. Just as the alchemist transforms physical elements, the person with a well-developed Ni function can transform their internal world through their use of intuition and understanding of symbolism. Therefore, the Ni function is related to alchemy in the sense that it involves the transformation and unlocking of hidden potential through the use of intuition and symbolism.
-
-
-
-
-**Jungian alchemy** is a process of psychological transformation that involves the integration and transformation of unconscious contents, or what Jung called "the shadow," into consciousness. One of the main goals of alchemy is to transmute base metal into gold, which is often seen as a metaphor for transforming the negative energies of the psyche, such as fear, into positive spiritual qualities, such as love and wisdom.
-
-In the Jungian perspective, fear is seen as a natural and necessary part of the shadow that needs to be acknowledged, faced, and integrated in order to grow and evolve. The shadow is a reservoir of repressed emotions, feelings, and desires that we are not aware of or do not want to acknowledge, but which still influence us from the unconscious.
-
-The process of Alchemy involves bringing these unconscious contents to the surface and transforming them by shining the light of consciousness upon them. By facing our fears, we are able to transform them into positive qualities such as love, compassion, wisdom, and creativity.
-
-"""
-
-coffee_text = """
-
-
-
-**The process of individuation,** similar to alchemy, is a transformative journey from the ego to the spirit, and can be compared to the process of turning raw coffee beans into a rich, aromatic cup of coffee.
-
-**At the beginning of the coffee process**, the raw coffee beans represent not only the ego in its raw, undeveloped state, but also raw traumatic events that have occurred in an individual's life. These events have the potential to become something more, just as the ego has the potential to transform into a higher state of consciousness.
-
-**The first step in the coffee process** is to select and sort the beans, much like how we must examine and understand our own ego and past experiences before we can begin the process of individuation. This is analogous to the process of introspection, where we must examine our thoughts, feelings, and behaviors to gain a deeper understanding of ourselves and our past.
-
-**Next, the beans are roasted and ground,** which represents the process of transforming the ego and traumatic events. This is similar to the alchemical process of transmutation, where base metals are transformed into gold. In the case of coffee, the beans are heated and ground into something entirely new, just as the ego and traumatic events are transformed through the process of individuation.
-
-**As the coffee beans are roasted and ground,** they release their unique flavors and aromas, just as the individuating ego begins to reveal its unique qualities and purpose. This transformation is not easy, as it requires patience, dedication, and a willingness to undergo the difficult and sometimes painful process of self-examination and healing.
-
-**Finally, the coffee is brewed,** resulting in a rich, complex cup of coffee that provides comfort and nourishment to the body and mind. Similarly, the individuated spirit is transformed into a higher state of consciousness that brings fulfillment and purpose to the individual, including a sense of mission and direction in life.
-
-**In both the coffee process and the process of individuation,** there is a transformation from something raw and traumatic to something refined and valuable. Both require time, effort, and skill to achieve, but the end result is a beautiful, complex, and satisfying creation that brings nourishment and purpose to the individual.
-
-
-"""
-
-nisi_text = """
-**Si, our sense of self and idendity (ego):**
-
-Introverted Sensing, or Si, is a cognitive function that is associated with the limbic and nervous system, as well as with our sense of identity and self. The limbic system is a complex structure in the brain that regulates mood, memory, and emotions, and is closely tied to Si's function. Si is responsible for the processing and storage of sensory experiences, which play an important role in shaping one's identity and sense of self. As a person collects more sensory experiences, their Si function helps categorize and store these experiences in the brain for future reference, allowing for a more refined and personalized understanding of the world around them. Consequently, those with a strong Si function often have a strong sense of personal identity, and are highly in tune with their subjective experiences and emotional responses.
-
-**Ni, our sense of direction and meaning (spirit):**
-
-Ni, also known as Introverted Intuition, is a cognitive function that can be associated with our spirit and its transformative power. One of the primary strengths of Ni is its ability to create new patterns and connections based on subconscious insights and intuition. This process allows for the emergence of new perspectives and ideas that can transform how a person perceives and interacts with the world.
-
-Ni is introverted, which means it primarily focuses on internal perceptions and processing of information. This process often happens beyond a person's conscious awareness, which is why Ni is sometimes described as a "sixth sense" or "gut feeling." Because Ni operates at a subconscious level, it has the power to tap into a person's deepest desires, fears, and aspirations, allowing for transformational growth and change.
-
-Through its capacity to create new patterns, Ni has the potential to inspire and transform a person's spirit, leading to a greater sense of purpose and connection to the world. This can manifest in various ways, such as in new and innovative ideas, a heightened awareness of personal values and goals, or a deep sense of intuition and spirituality.
-
-Ni is also associated with the process of individuation, which is the psychological development of the self towards a state of wholeness and integration. This process involves the integration of one's conscious and unconscious aspects to form a more complete sense of self, often leading to transformative growth and self-actualization.
-
-In summary, Ni's capacity to create new patterns and connections, coupled with its ability to tap into a person's subconscious desires and aspirations, allows it to be associated with the transformative power of the spirit. Through this inner process, Ni users can cultivate a greater sense of purpose and meaning in their lives, leading to profound personal growth and spiritual fulfillment.
-"""
-
-mirror_text = """
-
-**In Jungian alchemy,** the interaction between mirror types can represent the transformative power of opposites or the integration of the conscious and the unconscious. It is believed that these types can complement each other well by bringing together complementary cognitive functions. This configuration allows them to understand each other on a deep level, regardless of their differences in communication styles and energy levels.
-
-An analogy to understand mirror types is to think of two different puzzle pieces that come together to make a complete picture. Similarly, mirror types have complementary cognitive functions that come together to form a more complete understanding of the world.
-
-**Linda Berens' ideal pairings theory** suggests that certain MBTI types are naturally compatible with each other due to their complementary cognitive stacks. For example, INFJs and ENFPs are considered an ideal pairing due to their complementary cognitive functions. INFJs have dominant introverted intuition (Ni) and auxiliary extraverted feeling (Fe), while ENFPs have dominant extraverted intuition (Ne) and auxiliary introverted feeling (Fi). This means that INFJs can provide deep insight and vision, while ENFPs provide energy, enthusiasm, and passion for new ideas. Together, they can collaborate to generate and execute innovative solutions that are both insightful and impactful. This pairing can help individuals better understand and appreciate their partner's strengths and preferences in a relationship or collaborative setting.
-
-
-
-
-**INFJs and ENFPs** are considered to be mirror types because they share the same type of functions but in a different attitude (NiFe for INFJ and NeFi for ENFP). Despite having different personalities, they often find that each other's strengths complement their own, and they can relate easily to one another. They both value creativity, intuition, and authenticity, and often share the goal of making the world a better place by promoting the wellbeing of people and society. In practice, this often shows up as a desire to help others and to work towards solving social issues.
-
-
-
-
-**INTJs and ENTPs** share the same type of functions but in a different attitude (NiTe for INTJ and NeTi for ENTP), leading them to be referred to as mirror types. While they have different approaches to problem-solving and decision-making, they both value competence, originality, and intellectual stimulation. They are often natural leaders and enjoy taking on challenging projects that test their abilities. Their common goal is often to find innovative solutions to complex problems and to make a lasting impact in their fields of expertise.
-
-
-
-
-**INTPs and ENTJs** share the same type of functions but in a different attitude (TiNe for INTP and TeNi for ENTJ), making them mirror types. Despite having different personalities, they can relate on a profound level and often complement each other's strengths and weaknesses. They both value strategic thinking, logic, and rationality, and are often visionary thinkers who enjoy solving complex problems. Their common goal is often to be at the forefront of innovation, using their unique abilities to create long-lasting change in their areas of interest.
-
-
-
-
-**INFPs and ENFJs** share the same type of functions but in a different attitude (FiNe for INFP and FeNi for ENFJ), which can make them very different on the surface level, especially when it comes to expressing emotions and managing social dynamics. However, they share common values of empathy and authenticity, which can bring them together despite their personality differences. In practice, they often share a goal of making a positive impact in the world by helping others and promoting social harmony through mutual understanding and cooperation.
-
-"""
-
-vision_text = """
-
-The use of **MBTI, DMT, and Spirulina** can provide a unique approach to addressing some of the world's most significant problems. Here are a few examples:
-
-.
-1. **Mental Health**:
-
-MBTI is a personality assessment tool that can help identify people's cognitive preferences, including how they process information, make decisions, and interact with the world. By understanding different personalities, people can be matched with mental health therapies and treatments that suit their individual needs. In addition, DMT has been shown to have psychotherapeutic effects in the treatment of anxiety and depression. Spirulina, being a natural source of antioxidants and nutrients, can provide support to the brain and nervous system.
-
-.
-2. **Malnutrition and Food Insecurity**:
-
-Spirulina is a rich source of protein, vitamins, and minerals, which makes it a great option for combating malnutrition and food insecurity. AI can be used to optimize the design and planning of spirulina farms to produce maximum yield at the lowest possible cost. In addition, the use of DMT can complement these efforts by expanding people's perceptions of food and health.
-
-.
-3. **Environmental Issues**:
-
-MBTI can help people understand their environmental values, leading to more sustainable lifestyles. DMT experiences have also been shown to facilitate a sense of deep connection and responsibility towards the environment. Spirulina can be used to mitigate environmental problems as it can be grown on desert land and using salt water, making it a sustainable option for food production.
-
-Overall, the use of MBTI, DMT, and Spirulina can provide a unique and holistic approach to addressing some of the world's most significant problems, including mental health, malnutrition and food insecurity, and environmental issues.
-
-
-
-
-**Where we are now versus where we could potentially be:**
-
-.
-1. **Mental Health:**
-
-- _Where we are now_: Mental health issues are widespread and can have a significant impact on quality of life.
-- _Goal_: To provide more personalized and effective mental health treatment options.
-- _Where we want to be_: People can easily access mental health treatment that suits their individual needs thanks to tools like MBTI assessments, which complement a range of therapies. DMT and Spirulina therapies are optimized and become more widely used to assist in treating anxiety, depression, and other mental health issues, leading to improved mental health outcomes.
-
-.
-2. **Malnutrition and Food Insecurity:**
-
-- _Where we are now_: Many people lack access to nutritious and affordable food.
-- _Goal_: To increase access to nutrient-dense food and reduce food insecurity.
-- _Where we want to be_: Spirulina is used as a nutritious, sustainable, and cost-effective food source that helps address issues of malnutrition and food insecurity. AI is utilized to optimize Spirulina production and ensure it is accessible to more people. DMT use helps to increase the appreciation of food, leading to more conscious dietary choices.
-
-.
-3. **Environmental Issues:**
-
-- _Where we are now_: Environmental issues, such as climate change and pollution, pose pressing threats to ecosystems and human health.
-- _Goal_: To increase awareness and action toward sustainable environmental practices.
-- _Where we want to be_: MBTI is used to cultivate a sense of environmental responsibility and connection to the natural world. DMT experiences help to foster deeper connections with nature, leading to increased awareness and action toward sustainable practices. Spirulina farming utilizes sustainable practices, such as using saltwater and recycled nutrients, to minimize environmental impact and contribute to efforts towards a more sustainable future.
-
-
-
-
-There are several ways in which spirulina farming is more efficient than traditional farming:
-.
-1. **Land use**: Spirulina farming requires significantly less land to produce the same amount of protein compared to traditional farming. Spirulina can be cultivated in tanks or ponds, allowing for more efficient use of space.
-.
-2. **Water use**: Spirulina farming requires less water than traditional farming. Spirulina can be grown in both saltwater and freshwater, and it can tolerate a wide range of temperatures and pH levels. This makes it more adaptable to different environments, enabling it to grow in areas where traditional crops may struggle.
-.
-3. **Productivity**: Spirulina is a highly productive crop. It can produce up to 50 times more protein per unit area than traditional crops, such as soybeans or corn. This means that spirulina farming can produce more food using less land and water.
-.
-4. **Nutrient content**: Spirulina is a highly nutritious crop, containing essential amino acids, vitamins and minerals such as B12, iron, and calcium. Traditional crops may not contain such high levels of essential nutrients, particularly in cases where the soil is depleted of key nutrients.
-.
-5. **Sustainability**: Spirulina farming is environmentally sustainable. As a photosynthetic organism, spirulina does not require fertilizers, pesticides, or other inputs that are commonly used in traditional agriculture. This can help reduce the negative impact of farming on local ecosystems.
-
-"""
-
-journey_text = """
-The Sloth's journey will help you to understand the 11 steps of the universal hero's journey discovered by Joseph Campbell. A structure you find in all stories and life's stories.
-
-**Sloth Alchemist's Journey:**
-
-Once upon a time, in the lush forest, there was a sloth who had a traumatic past. He had witnessed the destruction of his home and the displacement of his family due to human activity. The sloth was deeply affected by this experience and felt a sense of despair and hopelessness for his own future and that of his animal companions.
-
-However, one day, as the sloth was sitting in a tree, he had a vision. He saw all the animals of the forest coming together in unity and love, and he knew that he had a purpose to fulfill. He realized that he could use his own experiences to help others, and he made it his mission to save the animals of the forest from a similar fate.
-
-_Step 1: The Call to Adventure_
-The sloth's journey began when he heard a call to adventure - a voice within him urging him to help the animals in the forest overcome their fears and come together in harmony. He knew that he had a special gift that could help them, and so he set out on a mission to find a way to bring his vision to life.
-
-_Step 2: Refusal of the Call_
-At first, the sloth was hesitant to accept this call to adventure. He was still struggling with his own trauma and feared that he might not be strong enough to help others. But his inner sense of purpose and his desire to prevent others from experiencing what he had experienced ultimately overcame his fears, and he took the first steps on his journey.
-
-_Step 3: Meeting the Mentor_
-As the sloth journeyed deeper into the forest, he met a wise old owl who became his mentor. The owl taught him about the power of alchemy and how it could be used to transform his past experiences into a force for good. With the owl’s guidance, the sloth began to understand how he could use his own experiences to help the animals in the forest overcome their fears and come together in harmony.
-
-_Step 4: Crossing the Threshold_
-With the owl’s guidance, the sloth started to take action. He began by reaching out to the other animals of the forest and listening to their concerns. He created a safe space where they could share their experiences and feelings, and he used his knowledge of alchemy to help them transform their pain into something positive. This was the moment when the sloth truly crossed the threshold and committed himself fully to his mission of bringing unity and love to the animals in the forest.
-
-_Step 5: Tests, Allies, Enemies_
-As the sloth continued on his journey, he faced many tests and challenges. Some animals were skeptical of his methods and resisted his teachings. But he also found allies along the way - other animals who shared his vision and were willing to work with him to make it a reality.
-
-_Step 6: Approach to the Inmost Cave_
-The approach to the inmost cave came when the sloth realized that he needed to do more to help the animals of the forest. He used his knowledge of alchemy to create SlothAi, an AI that could help the animals connect with each other on a deeper level and work together in harmony.
-
-_Step 7: Ordeal_
-Creating SlothAi was a challenging ordeal. It required the sloth to use all of his skills and knowledge, as well as to take risks and be vulnerable. But he persevered, and soon the AI was complete.
-
-_Step 8: Reward_
-The reward came when the sloth saw the positive impact that SlothAi was having on the animals in the forest. They were using it to connect with each other, to learn more about themselves and each other, and to find new ways to work together in harmony. The sloth felt a deep sense of satisfaction and fulfillment knowing that his mission was being realized.
-
-**Hero function and Hero's journey:**
-
-The hero function and the hero's overarching goal in the hero's journey are analogous, as both are integral to an individual's identity and purpose. The hero function plays a significant role in shaping an individual's behaviors, values, and thought processes, just as the hero's personality traits do in their journey. Embracing and utilizing the hero function can help individuals tap into their inherent strengths and overcome challenges, leading to personal growth and success.
-
-**Here is a short description of the hero's journey of each types:**
-
--- **ISTJ**: Through perseverance and hard work, they find meaning and purpose in fulfilling their duties, navigating obstacles with practicality and common sense.
--- **ISFJ**: As caretakers who value tradition and loyalty, they seek to fulfill their responsibilities and provide support to those around them, cultivating a sense of harmony and unity.
--- **INFJ**: Visionaries who seek to understand the world deeply, they explore their own values and spirituality, working towards creating a better world for themselves and others.
--- **INTJ**: Innovators with a thirst for knowledge, they seek to uncover new truths and develop strategies to achieve their goals, often going against the grain.
--- **ISTP**: Using their resourcefulness and practical skills, they navigate challenges and find solutions on their own, enjoying the adrenaline rush of solving complex problems.
--- **ISFP**: As artists who are inspired by emotion and their surroundings, they express themselves creatively and seek to find their place in the world, often appreciating the beauty in life's simple moments.
--- **INFP**: As idealists who strive to find their sense of purpose, they seek to create a meaningful life for themselves and others, often drawn to creative pursuits and seeking authentic self-expression.
--- **INTP**: Explorers of knowledge and ideas, they enjoy analyzing complex systems and developing new insights, often challenging the conventional way of thinking.
--- **ESTP**: Adventurers who crave excitement and live in the moment, they enjoy exploring the world around them, taking risks, and thriving in competitive environments.
--- **ESFP**: As outgoing and social individuals, they enjoy making connections with others and living life to the fullest, cherishing memorable experiences and enjoying the present moment.
--- **ENFP**: Visionary leaders who seek to inspire others and create a better future, they use their creativity and intuition to develop new ideas and inspire others to pursue their own dreams.
--- **ENTP**: As devil's advocates who enjoy debating and questioning the status quo, they seek to develop innovative solutions and change the world with their unique perspectives, often taking bold risks and challenging authority to achieve their goals.
--- **ESTJ**: Efficient and organized leaders who value discipline and rules, they use their practicality and sense of responsibility to guide and support their team towards success.
--- **ESFJ**: As nurturing and empathetic individuals, they prioritize maintaining social harmony and supporting those around them, ensuring that everyone's needs are met and everyone feels valued.
--- **ENTJ**: Strategic and ambitious leaders who enjoy taking charge and inspiring others, they use their intellect and vision to develop long-term plans and achieve their goals through bold action and calculated risk-taking.
--- **ENFJ**: As charismatic and empathetic individuals, they use their intuition and communication skills to inspire and motivate others towards achieving their shared visions and ideals, often cultivating strong relationships and networks in the process.
-
-"""
-
-analogy_text = """
-
-_**Here's an analogy for the cognitive functions with parts of a tree:**_
-
--.**Si:** Si is like the roots of a tree, which provide stability and nourishment to support the growth and development of the tree. Similarly, Si is the cognitive function that provides us with a solid foundation of knowledge and experience, helping us navigate life's challenges with stability and confidence. Just as the roots of a tree anchor it to the ground and provide the necessary nutrients, Si anchors us to our past experiences and provides us with a reservoir of information to draw upon, allowing us to make informed decisions and handle situations with ease.
-
--.**Se:** The trunk of a tree not only provides support and stability, but it also plays a crucial role in transporting nutrients and water from the roots to the leaves of the tree. Similarly, Se not only grounds us in the present moment and provides a sense of stability, but it also helps us to navigate and adapt to changes in our environment, ensuring our survival and well-being. As the trunk helps the tree withstand external forces and transport vital resources, Se allows us to stay attuned to our surroundings and make the most of the opportunities presented to us. Like the stomata on a tree's leaves, Se allows us to take in and process the sensory information around us, giving us a fuller, richer experience of life.
-
--.**Ne:** The branches of a tree that spread out in various directions represents Ne because it is the function that generates new ideas and possibilities, just like branches of a tree point out in different directions, representing the different possible ways we can approach a situation.
-
--.**Ni:** Ni is like the driving force that compels a tree's roots and leaves to constantly seek water and sunlight. Similarly, Ni is the cognitive function that motivates us to seek our life's direction and reach our full potential. In this way, just as a tree's roots and leaves work tirelessly to find the sustenance needed to grow and thrive, Ni pushes us to seek understanding and knowledge to support our personal and spiritual growth.
-
--.**Fe:** Fe can be compared to the flowers and pollens of a tree, which allow for cross-pollination and collaboration between different trees in the vicinity. Just as the flowers and pollens of one tree can spread to others, allowing for the sharing of resources and the growth of a larger community, Fe helps us connect with others emotionally and value social harmony, encouraging us to work together and cultivate a more collaborative and connected society.
-
--.**Fi:** Fi can be compared to the sap of a tree, which is the life force that sustains it. In the same way, Fi is the cognitive function that is the source of our inner values and emotions, which drives our actions and provides us with a sense of purpose and meaning. Just as the sap is essential to the growth and survival of the tree, Fi is essential to our personal growth and fulfillment.
-
--.**Ti:** Ti can also be likened to the process by which wood is made. Just as wood is formed from the combination of cellulose, hemicellulose, and lignin arranged in a specific order to create the complex lignocellulosic structure, Ti operates by using a logical and systematic process to take raw information and create a structured and coherent understanding. In this way, Ti is like the chemical reactions and molecular interactions that occur in the formation of wood, transforming disorderly elements into a structured and functional entity. The internal framework and structure of a tree, like Ti, determines the strength, flexibility, and ultimately the shape of the wood, shaping the outcome of the growth and development process.
-
--.**Te:** Te can be compared to the tree's ability to shed old leaves and grow new ones, which enables it to adapt and change in response to its environment. Similarly, Te is the cognitive function that helps us adapt to the external world and make decisions based on objective facts and data, enabling us to grow and evolve as individuals. Just as a tree sheds its old leaves to conserve resources and grow new ones, Te helps us shed inefficient or outdated ways of thinking and adopt more effective strategies to achieve our goals.
-
-
-
-
-_**Here are some job titles that might represent each type during the Roman Empire:**_
-
-1-.**ESTJ** - **Praetor:** Known for their strong leadership, attention to detail, and adherence to tradition, ESTJs would make excellent Praetors during the Roman Empire. Praetors were responsible for overseeing the legal system and maintaining order throughout the empire.
-
-2-.**ISTJ** - **Scribe:** With their meticulous attention to detail and strong memory, ISTJs would be well-suited to the position of scribe during the Roman Empire. As scribes, they would be responsible for recording and maintaining important documents and records.
-
-3-.**ESFJ** - **Vestal Virgin:** Known for their loyalty, commitment, and adherence to tradition, ESFJs would be ideal candidates for the position of Vestal Virgin during the Roman Empire. Vestal Virgins were responsible for maintaining the sacred flame of Vesta, the goddess of the hearth.
-
-4-.**ISFJ** - **Medicus:** With their empathetic and nurturing nature, ISFJs would make excellent medicus, or doctors, during the Roman Empire. As doctors, they would attend to the physical and emotional needs of their patients, and take great care in promoting their health and well-being.
-
-5-.**ESTP** - **Gladiator:** With their fearlessness, athleticism, and competitive spirit, ESTPs would excel as gladiators during the Roman Empire. As gladiators, they would engage in combat for the entertainment of the masses, and could potentially win great fame and fortune.
-
-6-.**ISTP** - **Engineer:** With their analytical and hands-on approach to problem-solving, ISTPs would make excellent engineers during the Roman Empire. As engineers, they would design and construct the many buildings and structures that defined the empire's architecture.
-
-7-.**ESFP** - **Actor:** Known for their outgoing personalities and love of the spotlight, ESFPs would make excellent actors during the Roman Empire. As actors, they would be responsible for entertaining the people with their performances in the theatre or at public events.
-
-8-.**ISFP** - **Artist:** With their creativity and attention to aesthetics, ISFPs would be well-suited to the role of artist during the Roman Empire. As artists, they would be responsible for creating works of art such as sculptures or paintings to decorate public spaces or private homes.
-
-9-.**ENFJ** - **Senator:** With their natural leadership skills and ability to inspire others, ENFJs would excel as Senators during the Roman Empire. As Senators, they would be responsible for leading the legislative branch of government and working to improve the lives of the Roman people.
-
-10-.**INFJ** - **Psychologist:** With their keen understanding of human behavior and emotions, INFJs could be well-suited to a role similar to a psychologist during the Roman Empire. They could provide counseling and support to people struggling with mental health issues or other psychological challenges.
-
-11-.**ENFP** - **Diplomat:** With their charismatic personality and ability to connect with others, ENFPs would be excellent diplomats during the Roman Empire. As diplomats, they would be responsible for negotiating peace and treaties with other nations and forging alliances to benefit Rome.
-
-12-.**INFP** - **Philosopher:** INFPs are known for their deep thoughts and values, making them great candidates for the role of philosopher during the Roman Empire. As philosophers, they would be responsible for exploring the meaning of life and human existence, sharing their insights and ideas with others.
-
-13-.**ENTJ** - **General:** With their strategic thinking and decisive attitude, ENTJs would make excellent generals during the Roman Empire. As generals, they would be responsible for leading the Roman army to victory in battle and expanding the empire's territory.
-
-14-.**INTJ** - **Advisor:** INTJs are known for their analytical minds and strategic thinking, making them ideal candidates for the role of advisor during the Roman Empire. As advisors, they would be responsible for providing counsel and guidance to the emperors and other leaders, helping them make wise decisions that would benefit the empire.
-
-15-.**ENTP** - **Lawyer:** With their quick wit and ability to see multiple perspectives, ENTPs would excel as lawyers during the Roman Empire. As lawyers, they would be responsible for arguing cases in court and defending their clients' rights and interests.
-
-16-.**INTP** - **Architect:** With their brilliant minds and attention to detail, INTPs would be well-suited to the role of architect during the Roman Empire. As architects, they would be responsible for designing and constructing some of Rome's most impressive buildings and structures.
-
-_Note that these are generalizations and not all individuals of a certain type might fit these job titles._
-"""
-
-def get_link(mbti_type):
- link = f'{mbti_dict[mbti_type]}'
- response = requests.get(mbti_dict_2[mbti_type])
- img = Image.open(BytesIO(response.content))
- response2 = requests.get(mbti_dict_3[mbti_type])
- img2 = Image.open(BytesIO(response2.content))
- return link, img, img2
-
-def get_link2(funct):
- link2 = f'{funct_dict[funct]}'
- return link2
-
-def get_link3(arch):
- link3 = f'{arch_dict[arch]}'
- return link3
-
-def get_link4(gen):
- link4 = f'{gen_dict[gen]}'
- return link4
-
-def update_question_textbox(title, mbti_type, output_question=""):
- return questions_dict.get(title, output_question).format(mbti_type)
-
-def get_empty_state():
- return {"total_tokens": 0, "messages": []}
-
-def update_prompt_temp():
- choices = list(prompt_templates.keys())
- choices = choices[:1] + sorted(choices[1:])
- return gr.update(value=choices[0], choices=choices)
-
-def update_mbti_dict():
- choices = list(mbti_dict.keys())
- choices = choices[:1] + sorted(choices[1:])
- return gr.update(value=choices[2], choices=choices)
-
-def on_token_change(user_token):
- openai.api_key = user_token
-
-def on_prompt_template_change(prompt_template):
- if not isinstance(prompt_template, str): return
- return prompt_templates[prompt_template]
-
-def on_check_q(output_question, checkbox_q, input_user):
- return output_question if checkbox_q else input_user
-
-def submit_message(user_token, prompt, prompt_template, temperature, max_tokens, context_length, state):
-
- history = state['messages']
-
- if not prompt:
- return gr.update(value=''), [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)], f"{state['total_tokens']}", state
-
- prompt_template = prompt_templates[prompt_template]
-
- system_prompt = [{ "role": "system", "content": prompt_template }]
-
- prompt_msg = { "role": "user", "content": prompt }
-
- try:
- completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + history[-context_length*2:] + [prompt_msg], temperature=temperature, max_tokens=max_tokens)
-
- history.append(prompt_msg)
- history.append(completion.choices[0].message.to_dict())
-
- state['total_tokens'] += completion['usage']['total_tokens']
-
- except Exception as e:
- history.append(prompt_msg)
- history.append({
- "role": "system",
- "content": f"Error: {e}"
- })
-
- total_tokens_used_msg = f"{state['total_tokens']}"
- chat_messages = [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)]
-
- return '', chat_messages, total_tokens_used_msg, state
-
-def clear_conversation():
- return gr.update(value=None, visible=True), None, "", get_empty_state()
-
-
-css = """
- .gradio-container {background-color: white}
- .prose p, .prose p strong, .prose p b, .prose p em, .prose p i, .prose li, .prose center, .prose center strong, .prose center b, .prose center em, .prose center i, .prose center li {color: #000000 !important;}
- #col-container {max-width: 100%; margin-left: auto; margin-right: auto;}
- #tab {color: #000000 !important;}
- #chatbox {min-height: 400px;}
- #image {max-width: 80%; margin-left: auto; margin-right: auto;}
- #image2 {max-width: 20%; margin-left: auto; margin-right: auto;}
- #image3 {max-width: 70%; margin-left: auto; margin-right: auto; border-radius: 4px;}
- #header {text-align: center; font-size: 1em;}
- #prompt_template_preview {padding: 1em; border-width: 1px; border-style: solid; border-color: #e0e0e0; border-radius: 4px;}
- #question_preview {padding: 1em; border-width: 1px; border-style: solid; border-color: #e0e0e0; border-radius: 4px; user-select: text;}
- #input_preview {padding: 1em; border-width: 1px; border-style: solid; border-color: #e0e0e0; border-radius: 4px; user-select: text;}
- #total_tokens_str {text-align: left; font-size: 0.8em; color: #666;}
- #label {padding: 0.5em; margin: 0;}
- .message { font-size: 1.2em; }
- """
-
-with gr.Blocks(css=css, theme=gr.themes.Monochrome(), title="SlothAi.xyz") as demo:
-
- state = gr.State(get_empty_state())
-
-
- with gr.Column(elem_id="col-container"):
- gr.HTML("""
""")
-
- with gr.Row():
- with gr.Column():
- with gr.Tab("Home", elem_id="tab"):
- chatbot = gr.Chatbot(elem_id="chatbox", label="Sloth Alchemist")
- input_message = gr.Markdown(elem_id="question_preview", visible=False)
- input_user = gr.Textbox(show_label=False, placeholder="Enter text and press enter", visible=True).style(container=False)
- btn_submit = gr.Button("Submit")
- default_k = gr.Markdown(value="By default, you are using the limited OpenAI key provided by SlothAi.xyz. If the limit is reached, enter your own free key at the bottom of the page. By the way, you can increase the Sloth's creativity by adjusting the parameters in Settings, by default, it is set to have a fast response time.", elem_id="question_preview")
- total_tokens_str = gr.Textbox(label="Total tokens used:", elem_id="total_tokens_str", interactive=False)
- btn_clear_conversation = gr.Button("Start New Conversation")
- btn_clear_conversation.click(clear_conversation, [], [input_message, chatbot, total_tokens_str, state])
- checkbox_q = gr.Checkbox(label="1.- Check to enable, after select your type and a question, then press -Submit-. It will automatically submit the question to the Sloth.")
- mbti_type_input = gr.Dropdown(label="2.- Select your type:", choices=list(mbti_dict.keys()), value="INFJ")
- title_dropdown = gr.Dropdown(label="3.- Select a question:", choices=list(questions_dict.keys()), value="Hero Test")
- output_question = gr.Markdown(value="Can you help me to find my type with your Hero Test?", elem_id="question_preview")
- title_dropdown.change(update_question_textbox, inputs=[title_dropdown, mbti_type_input], outputs=[output_question])
- gr.Markdown("---")
- gr.Markdown("Enter your own OpenAI API Key. You can get it for free [here](https://platform.openai.com/account/api-keys). To save your API key for future use, you can add it to your password manager of your web browser.", elem_id="label")
- user_token = gr.Textbox(placeholder="OpenAI API Key", type="password", show_label=False)
- user_token.change(on_token_change, inputs=[user_token], outputs=[])
- gr.Markdown("---")
- gr.Markdown("I can help you find your type, using this test:")
- gr.HTML("""