diff --git a/spaces/101-5/gpt4free/g4f/.v1/testing/poe_test.py b/spaces/101-5/gpt4free/g4f/.v1/testing/poe_test.py deleted file mode 100644 index 6edc030c3fc6d85c2cb8a27e8637391fbeac8c3f..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/.v1/testing/poe_test.py +++ /dev/null @@ -1,13 +0,0 @@ -from time import sleep - -from gpt4free import quora - -token = quora.Account.create(proxy=None, logging=True) -print('token', token) - -sleep(2) - -for response in quora.StreamingCompletion.create(model='ChatGPT', prompt='hello world', token=token): - print(response.text, flush=True) - -quora.Account.delete(token) diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/3d Systems Cubify Sculpt 2014 32bit Incl Crack.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/3d Systems Cubify Sculpt 2014 32bit Incl Crack.md deleted file mode 100644 index 92b00e0d8d01889a7ab0e185a0e16fb9187c380e..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/3d Systems Cubify Sculpt 2014 32bit Incl Crack.md +++ /dev/null @@ -1,75 +0,0 @@ - -
-3D Systems Cubify Sculpt 2014 32bit Incl Crack: A Powerful and Easy-to-Use Software for 3D Printing-Have you ever dreamed of creating your own 3D models and printing them out in real life? Do you want to design anything from toys, jewelry, art, figurines, sculptures, prototypes, and more? If you answered yes to these questions, then you need to check out Cubify Sculpt 2014, a powerful and easy-to-use software for 3D printing. Cubify Sculpt 2014 is a product of 3D Systems, a leading company in the 3D printing industry. Cubify Sculpt 2014 allows you to sculpt and manipulate virtual clay with your mouse or touch screen, just like you would with real clay. You can create organic shapes, add textures, colors, and details, and export your models to print them in 3D. Cubify Sculpt 2014 is compatible with Windows 7 and 8, and requires a 32-bit system. In this article, I will show you how to download and install Cubify Sculpt 2014 32bit incl crack, how to use it to create amazing 3D models, how to export and print your models, and some tips and tricks for using it effectively. By the end of this article, you will be able to unleash your creativity and make your own 3D masterpieces with Cubify Sculpt 2014. -3d Systems Cubify Sculpt 2014 32bit Incl CrackDOWNLOAD ››››› https://byltly.com/2uKvFW - How to Download and Install Cubify Sculpt 2014 32bit Incl Crack-The first step to use Cubify Sculpt 2014 is to download and install it on your computer. You can buy the software from the official website of Cubify for $129, or you can download it for free from a reliable source such as this one. If you choose the latter option, you will also get a crack file that will activate the full version of the software. Here are the steps to download and install Cubify Sculpt 2014 32bit incl crack: -
Congratulations! You have successfully downloaded and installed Cubify Sculpt 2014 32bit incl crack. Now you are ready to use it to create amazing 3D models. -How to Use Cubify Sculpt 2014 to Create Amazing 3D Models-Cubify Sculpt 2014 is a software that lets you sculpt and manipulate virtual clay with your mouse or touch screen, just like you would with real clay. You can start with a box, sphere or cylinder of virtual clay, and use various tools to push, pull, smooth, emboss, deform, reform, paint, and more. You can also design with symmetry when modeling a face or figurine, or deform and reform your model by squishing and pulling whole objects. You can add patterns and textures from Cubify Sculpt's library or import your own displacement map. You can also add color with the paintbrush feature. Here are the steps to use Cubify Sculpt 2014 to create amazing 3D models: -
Congratulations! You have successfully used Cubify Sculpt 2014 to create an amazing 3D model. Now you are ready to export and print it. - -How to Export and Print Your 3D Models with Cubify Sculpt 2014-Cubify Sculpt 2014 allows you to export and print your 3D models in various ways. You can save your model as a STL, OBJ, PLY, CLY or ZPC file, and choose your preferred printing method: Cloudprint, Cube printer or third-party printer. Here are the steps to export and print your 3D models with Cubify Sculpt 2014: -
Congratulations! You have successfully exported and printed your 3D model with Cubify Sculpt 2014. Now you can enjoy your 3D masterpiece in real life. -Tips and Tricks for Using Cubify Sculpt 2014 Effectively-Cubify Sculpt 2014 is a powerful and easy-to-use software for 3D printing, but there are some tips and tricks that can help you use it more effectively. Here are some of them: -
These are some of the tips and tricks for using Cubify Sculpt 2014 effectively. You can also explore more features and functions by clicking on the "Help" button on the top right corner of the screen, and choosing from various options such as "Tutorials", "FAQs", "Support", etc. -Conclusion: Why Cubify Sculpt 2014 is a Great Choice for 3D Printing Enthusiasts-In conclusion, Cubify Sculpt 2014 is a great choice for 3D printing enthusiasts who want to create their own 3D models and print them out in real life. Cubify Sculpt 2014 is a powerful and easy-to-use software that lets you sculpt and manipulate virtual clay with your mouse or touch screen, just like you would with real clay. You can create organic shapes, add textures, colors, and details, and export your models to print them in 3D. Cubify Sculpt 2014 is compatible with Windows 7 and 8, and requires a 32-bit system. You can download and install Cubify Sculpt 2014 32bit incl crack for free from a reliable source such as this one. You can also use some tips and tricks to use it more effectively. -If you are interested in creating your own 3D masterpieces with Cubify Sculpt 2014, don't hesitate any longer. Download Cubify Sculpt 2014 today and unleash your creativity! -FAQs-Here are some frequently asked questions about Cubify Sculpt 2014: -
Cubify Sculpt 2014 requires a Windows 7 or 8 operating system with a 32-bit processor. It also requires a minimum of 2 GB RAM, 1 GB free disk space, OpenGL graphics card with at least 256 MB RAM, Internet connection for activation and updates. -Cubify Sculpt 2014 supports the following file formats: STL, OBJ, PLY, CLY and ZPC. You can import and export these file formats to and from Cubify Sculpt 2014. -Cubify Sculpt 2014 offers three printing methods: Cloudprint, Cube printer or third-party printer. You can choose your preferred method by clicking on the "Print" button on the top left corner of the screen. You can also adjust your print settings such as scale, orientation and resolution by using the tools on the left side of the screen. -Cubify Sculpt 2014 has several advantages over other 3D modeling software, such as: -
If you need more help or support for Cubify Sculpt 2014, you can click on the "Help" button on the top right corner of the screen, and choose from various options such as "Tutorials", "FAQs", "Support", etc. You can also visit the official website of Cubify or contact their customer service team. -I hope you enjoyed this article and learned how to use Cubify Sculpt 2014 to create amazing 3D models. If you have any questions or feedback, please leave a comment below. Thank you for reading! b2dd77e56b- - \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/4K Video Downloader Patch The Ultimate Guide to Downloading High-Quality Videos.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/4K Video Downloader Patch The Ultimate Guide to Downloading High-Quality Videos.md deleted file mode 100644 index 90e91ec3416b15fe6d35f0c872c7b46dfd2fe658..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/4K Video Downloader Patch The Ultimate Guide to Downloading High-Quality Videos.md +++ /dev/null @@ -1,25 +0,0 @@ - - How to Use 4K Video Downloader Patch to Download Videos from Any Site-If you are looking for a way to download videos from any site in high quality, you might want to try 4K Video Downloader Patch. This is a software that allows you to download videos from YouTube, Vimeo, Facebook, Instagram and more in 4K resolution. You can also download playlists, channels, subtitles and 3D videos with this software. -4k video downloader patchDownload ::: https://byltly.com/2uKA7c - But how do you use 4K Video Downloader Patch to download videos from any site? Here are the steps you need to follow: -
That's how you use 4K Video Downloader Patch to download videos from any site. This software is easy to use, fast and reliable. It can help you save your favorite videos offline and watch them anytime you want. However, you should always respect the copyright of the video owners and use this software for personal use only. - -Now that you know how to use 4K Video Downloader Patch to download videos from any site, you might be wondering what are the benefits of using this software. Here are some of the reasons why 4K Video Downloader Patch is one of the best video downloader software available: -
These are just some of the benefits of using 4K Video Downloader Patch to download videos from any site. This software is fast, reliable and versatile. It can help you save your favorite videos offline and watch them anytime you want. However, you should always respect the copyright of the video owners and use this software for personal use only. - ddb901b051- - \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bosch ESI Tronic 2.0 Key Generator What You Need to Know Before You Buy.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bosch ESI Tronic 2.0 Key Generator What You Need to Know Before You Buy.md deleted file mode 100644 index cbf729bf5b18a8ec96ff74cfa86e2c786109d80b..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bosch ESI Tronic 2.0 Key Generator What You Need to Know Before You Buy.md +++ /dev/null @@ -1,205 +0,0 @@ - - What is Bosch ESI Tronic 2.0 and why do you need it?-If you are a professional mechanic, a car enthusiast, or a vehicle owner who wants to perform maintenance, service, and repair work on your own, you need a reliable diagnostic software that can help you with various tasks. One of the best options available in the market is Bosch ESI Tronic 2.0, a comprehensive diagnostic software that covers a wide range of vehicles worldwide. -Bosch ESI Tronic 2.0 is an online diagnostic software that enables workshops to carry out diagnosis, troubleshooting, repair, maintenance, service, wiring diagrams, schematics, and more quickly, efficiently, and effectively. The diagnostic software is compatible with Bosch KTS diagnostic tools, such as KTS 560, 590, 350, or 250. It also works with other standard OBD-II scanners. -bosch esi tronic 2.0 key generatorDownload Zip →→→ https://byltly.com/2uKwLU - Bosch ESI Tronic 2.0 has many features that make it stand out from other diagnostic software. Some of these features are: -
With Bosch ESI Tronic 2.0, you can perform various tasks on your vehicle with ease and accuracy. Some of these tasks are: -bosch esi tronic 2.0 activation code generator
How to install and activate Bosch ESI Tronic 2.0?-To use Bosch ESI Tronic 2.0, you need to install it on your computer or laptop first. You also need to activate it with a valid license key before you can use it fully. Here are the requirements and steps for installing and activating Bosch ESI Tronic 2.0: -Requirements-To install Bosch ESI Tronic 2.0 on your computer or laptop, you need to meet the following requirements: -
Steps-To install Bosch ESI Tronic 2.0 on your computer or laptop, follow these steps: -
How to use Bosch ESI Tronic 2.0 for vehicle diagnosis and repair?-Bosch ESI Tronic 2.0 is designed to help you diagnose and repair vehicles easily and accurately. The software has various functions and modules that cover different aspects of vehicle diagnosis and repair. Here are some of the main functions and modules of Bosch ESI Tronic 2.0: -Troubleshooting and fault codes-This function allows you to read and clear fault codes from various control units in your vehicle. You can also view actual values, perform actuator tests, adjust basic settings, code and program control units, calibrate sensors, etc., depending on the vehicle model and system. -To use this function: -
Maintenance and service schedules-This function allows you to access and follow Maintenance and service schedules-This function allows you to access and follow the recommended maintenance and service intervals for different vehicles. You can also reset the service indicators after performing the required service tasks. -To use this function: -
Wiring diagrams and schematics-This function allows you to view and print wiring diagrams and schematics for various systems and components in your vehicle. You can also zoom in and out, highlight, search, and navigate through the diagrams and schematics. -To use this function: -
How to update Bosch ESI Tronic 2.0 online?-Bosch ESI Tronic 2.0 is an online diagnostic software that requires regular updates to keep up with the latest vehicle models, systems, components, functions, news, etc. Updating Bosch ESI Tronic 2.0 online has many benefits, such as: -
To update Bosch ESI Tronic 2.0 online, you need an internet connection and a valid license key. Here is the process of updating Bosch ESI Tronic 2.0 online: -
News and new features-To find out the latest news and new features of Bosch ESI Tronic 2.0 online, you can use the following functions: -
Online support and feedback-If you have any questions, problems, or feedback about Bosch ESI Tronic 2.0 online, you can use the following functions: -
How to get a Bosch ESI Tronic 2.0 key generator?-A key generator is a software program that generates random license keys for activating a software product without paying for it. A key generator is usually used by people who want to use a software product for free or who cannot afford to buy a license key legally. A Bosch ESI Tronic 2.0 key generator is a key generator that generates license keys for activating Bosch ESI Tronic 2.0 without buying it from Bosch. A Bosch ESI Tronic 2.0 key generator may seem like an attractive option for some people who want to use Bosch ESI Tronic 2.0 without paying for it. However, there are many advantages and disadvantages of using a key generator for activating Bosch ESI Tronic 2.0. Here are some of them: -Advantages and disadvantages of using a key generator-Legal and ethical issues-The most obvious disadvantage of using a key generator for activating Bosch ESI Tronic 2.0 is that it is illegal and unethical. Using a key generator is considered as piracy, which is a form of theft. Piracy violates the intellectual property rights of Bosch, which is the creator and owner of Bosch ESI Tronic 2.0. Piracy also harms the legitimate customers of Bosch, who pay for their license keys legally. Piracy reduces the revenue of Bosch, which affects its ability to invest in research, development, innovation, quality, customer service, etc. Piracy also exposes the users of key generators to legal risks and consequences. Bosch may detect the use of key generators by monitoring its online activation system. Bosch may also take legal action against the users of key generators by suing them for damages, fines, penalties, etc. Using a key generator is not only illegal but also unethical. Using a key generator is unfair to Bosch, which invests time, money, and effort in creating and maintaining Bosch ESI Tronic 2.0. Using a key generator is also unfair to other users of Bosch ESI Tronic 2.0, who pay for their license keys legally. Using a key generator is dishonest and disrespectful to Bosch, which provides a valuable service to its customers by offering them a high-quality diagnostic software. Using a key generator is also dishonest and disrespectful to oneself, as it shows a lack of integrity, responsibility, and professionalism. Therefore, using a key generator for activating Bosch ESI Tronic 2.0 is not advisable from a legal and ethical point of view. -Quality and reliability issues-Another disadvantage of using a key generator for activating Bosch ESI Tronic 2.0 is that it may compromise the quality and reliability of the software. Using a key generator may cause problems such as: - The software may not work properly or at all. - The software may crash or freeze frequently. - The software may contain errors or bugs that affect its performance and functionality. - The software may be incompatible with some vehicles, systems, components, functions, etc. - The software may be outdated or missing some features or information. - The software may compromise your personal data or privacy by sending it to unknown third parties. Using a key generator may also prevent you from accessing the online features and benefits of Bosch ESI Tronic 2.0, such as: - Online updates that keep the software up to date with the latest vehicle models, systems, components, functions, news, etc. - Online support that allows you to contact Bosch customer service directly from the software. - Online feedback that allows you to provide your suggestions and opinions on the software. Therefore, using a key generator for activating Bosch ESI Tronic 2.0 may not guarantee the quality and reliability of the software. -Where to find a Bosch ESI Tronic 2.0 key generator?-If you still want to use a key generator for activating Bosch ESI Tronic 2.0, despite the disadvantages and risks mentioned above, you may wonder where to find one. There are many sources where you can find a key generator online or offline, such as: - Websites that offer key generators or links to them for free or for a fee. - Forums that discuss key generators or share them among users. - Torrents that allow users to download key generators or other pirated software. - CDs or DVDs that contain key generators or other pirated software. However, finding a key generator is not easy or safe. You need to be careful and cautious when looking for a key generator, as there are many scams, viruses, malware, and other threats that may harm your computer or yourself. Here are some tips and precautions for finding a key generator: -Trusted websites and forums-Not all websites and forums that offer key generators are trustworthy or reputable. Some of them may be fake, fraudulent, or malicious. They may trick you into downloading viruses, malware, spyware, etc., instead of key generators. They may also ask you for personal information, such as your name, email address, credit card number, etc., and use it for identity theft or other illegal purposes. To avoid these scams and threats, you should only visit trusted websites and forums that have good reviews, ratings, feedbacks, etc., from other users. You should also check the domain name, URL, security certificate, etc., of the website or forum before visiting it. You should also scan the downloaded file with an antivirus program before opening it. -Cautionary measures and precautions-Even if you find a trusted website or forum that offers a key generator, you should still take some cautionary measures and precautions before using it. Some of these measures and precautions are: - Backup your computer data before using a key generator. - Disable your internet connection before using a key generator. - Use a virtual machine or sandbox to run a key generator. - Use a firewall or antivirus program to block any unwanted connections or activities from a key generator. - Do not share your license key with anyone else. - Do not update your software online after using a key generator. These measures and precautions may help you reduce the risks and damages that may result from using a key generator. -Conclusion-Bosch ESI Tronic 2.0 is a powerful and comprehensive diagnostic software that can help you diagnose and repair vehicles quickly, efficiently, and effectively. It has many features and functions that cover different aspects of vehicle diagnosis and repair. It also has online features and benefits that keep the software up to date and provide support and feedback. To use Bosch ESI Tronic 2.0, you need to install and activate it with a valid license key. You can get a license key from Bosch by registering your product online or by contacting your local dealer. Alternatively, you can use a key generator to generate a license key for activating Bosch ESI Tronic 2.0 without paying for it. However, using a key generator has many disadvantages and risks, such as legal and ethical issues, quality and reliability issues, scams, viruses, malware, and other threats. Therefore, it is advisable to use Bosch ESI Tronic 2.0 legally and ethically, by buying a license key from Bosch or its authorized dealers. -FAQs-Here are some frequently asked questions about Bosch ESI Tronic 2.0 and key generators: -
- - \ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Coat Of Arms Design Studio Pro Torrent.md b/spaces/1gistliPinn/ChatGPT4/Examples/Coat Of Arms Design Studio Pro Torrent.md deleted file mode 100644 index aae4bac2739d9a947b41efce5d5cd17f575dc963..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Coat Of Arms Design Studio Pro Torrent.md +++ /dev/null @@ -1,6 +0,0 @@ - coat of arms design studio pro torrentDOWNLOAD –––––>>> https://imgfil.com/2uxZOP - -Hi All, Was going to download the free version of Coat of Arms Design Studio but the links on their page are just erroring, does anyone have a ... 1fdad05405 - - - diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/FeatureCAM 2019 Xforce Keygen 64 Bits _BEST_.md b/spaces/1gistliPinn/ChatGPT4/Examples/FeatureCAM 2019 Xforce Keygen 64 Bits _BEST_.md deleted file mode 100644 index 598b0f48cd35cf43312d63ca0c7dd2982b175622..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/FeatureCAM 2019 Xforce Keygen 64 Bits _BEST_.md +++ /dev/null @@ -1,42 +0,0 @@ - FeatureCAM 2019 xforce keygen 64 bitsDownload File ✦✦✦ https://imgfil.com/2uxYCf - -The result is awesome. - -3. Video Effects Free - -Video effects is a great tool for create video. You can add effect like Instagram for your video. Video effects is free to use. - -4. Vine - -Vine is video sharing app. You can share video in 6 seconds. You can add emojis and choose rich video style. It is short video app, you need to share it on your favorite social media. Vine for Android. - -5. LoopPeer - -LoopPeer is video sharing app. You can download video on mobile, and share the video on your social media. - -6. Super Fast Mode - -Super Fast Mode is video editing app. With Super Fast Mode, you can edit videos. You can trim, edit your video, add subtitle and share your videos. - -7. PicCollage - -PicCollage is best photo editing and video editor app. You can trim your video, add music, photo, add animation. PicCollage can make photo collages. - -8. Skype Video Chat - -Skype Video Chat is video editing app. You can record your video and share your video on social media. The result is awesome. - -9. Instagram Video Editor - -Instagram Video Editor is best video editor. You can edit your videos, trim your videos, add subtitle, add photo, add gif and send to your friends on Facebook or Instagram. - -10. PhotoCollage - -PhotoCollage is photo editor, photo collage maker. You can create collages, add photo, add video, add text. The result is awesome.Flat panel liquid crystal display devices have been used in flat panel display devices of small-size products such as mobile phones. In recent years, however, the demand for display devices of higher resolution has been increasing as a result of recent improvements in the performance of personal computers. Under the circumstances, large screen display devices having a diagonal length of 40 inches and more have been developed (see, for example, Patent Document 1). - -The liquid crystal display device, as one type of the flat panel display devices, basically comprises a liquid crystal layer, two substrates and a backlight. - -The liquid crystal layer is formed of an extremely thin liquid crystal layer having thicknesses of 1 μm or less. On the other hand, the two substrates, on the liquid crystal layer, are formed of glass substrates of relatively thick thicknesses. These glass substrates 4fefd39f24 - - - diff --git a/spaces/1line/AutoGPT/autogpt/commands/web_playwright.py b/spaces/1line/AutoGPT/autogpt/commands/web_playwright.py deleted file mode 100644 index 4e388ded203cefb5e24f9116f7fe5b8a94893413..0000000000000000000000000000000000000000 --- a/spaces/1line/AutoGPT/autogpt/commands/web_playwright.py +++ /dev/null @@ -1,80 +0,0 @@ -"""Web scraping commands using Playwright""" -from __future__ import annotations - -try: - from playwright.sync_api import sync_playwright -except ImportError: - print( - "Playwright not installed. Please install it with 'pip install playwright' to use." - ) -from bs4 import BeautifulSoup - -from autogpt.processing.html import extract_hyperlinks, format_hyperlinks - - -def scrape_text(url: str) -> str: - """Scrape text from a webpage - - Args: - url (str): The URL to scrape text from - - Returns: - str: The scraped text - """ - with sync_playwright() as p: - browser = p.chromium.launch() - page = browser.new_page() - - try: - page.goto(url) - html_content = page.content() - soup = BeautifulSoup(html_content, "html.parser") - - for script in soup(["script", "style"]): - script.extract() - - text = soup.get_text() - lines = (line.strip() for line in text.splitlines()) - chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) - text = "\n".join(chunk for chunk in chunks if chunk) - - except Exception as e: - text = f"Error: {str(e)}" - - finally: - browser.close() - - return text - - -def scrape_links(url: str) -> str | list[str]: - """Scrape links from a webpage - - Args: - url (str): The URL to scrape links from - - Returns: - Union[str, List[str]]: The scraped links - """ - with sync_playwright() as p: - browser = p.chromium.launch() - page = browser.new_page() - - try: - page.goto(url) - html_content = page.content() - soup = BeautifulSoup(html_content, "html.parser") - - for script in soup(["script", "style"]): - script.extract() - - hyperlinks = extract_hyperlinks(soup, url) - formatted_links = format_hyperlinks(hyperlinks) - - except Exception as e: - formatted_links = f"Error: {str(e)}" - - finally: - browser.close() - - return formatted_links diff --git a/spaces/1line/AutoGPT/autogpt/config/ai_config.py b/spaces/1line/AutoGPT/autogpt/config/ai_config.py deleted file mode 100644 index d50c30beee9dc8009f63415378ae1c6a399f0037..0000000000000000000000000000000000000000 --- a/spaces/1line/AutoGPT/autogpt/config/ai_config.py +++ /dev/null @@ -1,121 +0,0 @@ -# sourcery skip: do-not-use-staticmethod -""" -A module that contains the AIConfig class object that contains the configuration -""" -from __future__ import annotations - -import os -from typing import Type - -import yaml - - -class AIConfig: - """ - A class object that contains the configuration information for the AI - - Attributes: - ai_name (str): The name of the AI. - ai_role (str): The description of the AI's role. - ai_goals (list): The list of objectives the AI is supposed to complete. - """ - - def __init__( - self, ai_name: str = "", ai_role: str = "", ai_goals: list | None = None - ) -> None: - """ - Initialize a class instance - - Parameters: - ai_name (str): The name of the AI. - ai_role (str): The description of the AI's role. - ai_goals (list): The list of objectives the AI is supposed to complete. - Returns: - None - """ - if ai_goals is None: - ai_goals = [] - self.ai_name = ai_name - self.ai_role = ai_role - self.ai_goals = ai_goals - - # Soon this will go in a folder where it remembers more stuff about the run(s) - SAVE_FILE = os.path.join(os.path.dirname(__file__), "..", "ai_settings.yaml") - - @staticmethod - def load(config_file: str = SAVE_FILE) -> "AIConfig": - """ - Returns class object with parameters (ai_name, ai_role, ai_goals) loaded from - yaml file if yaml file exists, - else returns class with no parameters. - - Parameters: - config_file (int): The path to the config yaml file. - DEFAULT: "../ai_settings.yaml" - - Returns: - cls (object): An instance of given cls object - """ - - try: - with open(config_file, encoding="utf-8") as file: - config_params = yaml.load(file, Loader=yaml.FullLoader) - except FileNotFoundError: - config_params = {} - - ai_name = config_params.get("ai_name", "") - ai_role = config_params.get("ai_role", "") - ai_goals = config_params.get("ai_goals", []) - # type: Type[AIConfig] - return AIConfig(ai_name, ai_role, ai_goals) - - def save(self, config_file: str = SAVE_FILE) -> None: - """ - Saves the class parameters to the specified file yaml file path as a yaml file. - - Parameters: - config_file(str): The path to the config yaml file. - DEFAULT: "../ai_settings.yaml" - - Returns: - None - """ - - config = { - "ai_name": self.ai_name, - "ai_role": self.ai_role, - "ai_goals": self.ai_goals, - } - with open(config_file, "w", encoding="utf-8") as file: - yaml.dump(config, file, allow_unicode=True) - - def construct_full_prompt(self) -> str: - """ - Returns a prompt to the user with the class information in an organized fashion. - - Parameters: - None - - Returns: - full_prompt (str): A string containing the initial prompt for the user - including the ai_name, ai_role and ai_goals. - """ - - prompt_start = ( - "Your decisions must always be made independently without" - " seeking user assistance. Play to your strengths as an LLM and pursue" - " simple strategies with no legal complications." - "" - ) - - from autogpt.prompt import get_prompt - - # Construct full prompt - full_prompt = ( - f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n" - ) - for i, goal in enumerate(self.ai_goals): - full_prompt += f"{i+1}. {goal}\n" - - full_prompt += f"\n\n{get_prompt()}" - return full_prompt diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/APKPure Presents Red WhatsApp APK Download for Android Devices.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/APKPure Presents Red WhatsApp APK Download for Android Devices.md deleted file mode 100644 index 4ee3ba10e24a2b55e985814f769db01e2a861e3f..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/APKPure Presents Red WhatsApp APK Download for Android Devices.md +++ /dev/null @@ -1,138 +0,0 @@ - - Red WhatsApp APK Download Apkpure: What You Need to Know-WhatsApp is one of the most popular messaging apps in the world, with over 2 billion monthly active users. However, some people are not satisfied with the official WhatsApp app and look for modified versions that offer more features and customization options. One of these mods is Red WhatsApp APK, which claims to be a better and more stylish version of WhatsApp. But is it safe and reliable? How can you download it from Apkpure? And are there any alternatives to Red WhatsApp APK? In this article, we will answer these questions and more. -What is Red WhatsApp APK?-Red WhatsApp APK is a modded version of WhatsApp that changes the color scheme of the app to red and black. It also adds some extra features that are not available in the official WhatsApp app, such as: -red whatsapp apk download apkpureDownload File ⚡ https://urlin.us/2uSSn8 - Features of Red WhatsApp APK-
Risks of Red WhatsApp APK-While Red WhatsApp APK may sound tempting, it also comes with some risks that you should be aware of before downloading it. These include: -
How to Download Red WhatsApp APK from Apkpure-If you still want to try Red WhatsApp APK despite the risks, you can download it from Apkpure, which is a third-party app store that hosts various Android apps and games. Here are the steps to download and install Red WhatsApp APK from Apkpure: -Steps to Download and Install Red WhatsApp APK-
How to Use Red WhatsApp APK-Using Red WhatsApp APK is similar to using the official WhatsApp app, with some minor differences. Here are some tips on how to use Red WhatsApp APK: -
Alternatives to Red WhatsApp APK-If you are looking for other ways to enhance your WhatsApp experience without risking your account or device, you can try some of these alternatives to Red WhatsApp APK: -Telegram Messenger-Telegram is a cloud-based messaging app that offers many features that WhatsApp does not, such as: -
You can download Telegram from the Google Play Store or from Telegram.org. -Signal Private Messenger-Signal is a privacy-focused messaging app that uses end-to-end encryption for all your communications. It also offers some features that WhatsApp does not, such as: -
You can download Signal from the Google Play Store or from Signal.org. -red whatsapp plus apk download apkpure Other WhatsApp Mods-If you still want to use a modded version of WhatsApp, you can try some of these other WhatsApp mods that are more popular and updated than Red WhatsApp APK: -
Conclusion-Red WhatsApp APK is a modded version of WhatsApp that offers some extra features and customization options, but it also comes with some risks and drawbacks. If you want to download it from Apkpure, you need to follow some steps and enable unknown sources on your device. However, you may also consider some alternatives to Red WhatsApp APK, such as Telegram, Signal, or other WhatsApp mods that are more secure and updated. Ultimately, the choice is yours, but you should be careful and responsible when using any modded app. -FAQs-What is the difference between Red WhatsApp APK and WhatsApp Plus?-Red WhatsApp APK and WhatsApp Plus are both modded versions of WhatsApp that offer similar features and customization options. However, Red WhatsApp APK has a red and black color scheme, while WhatsApp Plus has a blue and white color scheme. Also, Red WhatsApp APK is not updated as frequently as WhatsApp Plus, which may make it more prone to bugs and errors. -Is Red WhatsApp APK legal?-Red WhatsApp APK is not legal, as it violates the terms of service of WhatsApp and infringes on its intellectual property rights. Using Red WhatsApp APK may get your account banned or suspended by WhatsApp. Also, downloading Red WhatsApp APK from Apkpure or any other third-party app store may expose your device to malware or spyware. -Can I backup my chats from Red WhatsApp APK to Google Drive?-No, you cannot backup your chats from Red WhatsApp APK to Google Drive, as Google Drive does not support modded apps. If you want to backup your chats from Red WhatsApp APK, you need to use a local backup option or a third-party app such as Titanium Backup. -Can I use Red WhatsApp APK on iOS devices?-No, you cannot use Red WhatsApp APK on iOS devices, as it is only compatible with Android devices. If you want to use a modded version of WhatsApp on iOS devices, you need to jailbreak your device and use a tweak such as Watusi or WhatsApp++. -How can I update Red WhatsApp APK?-To update Red WhatsApp APK, you need to visit Apkpure or any other website that hosts the latest version of the app and download it manually. You cannot update Red WhatsApp APK from the app itself or from the Google Play Store. 197e85843d- - \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Angry Birds Classic The Game that Made History.md b/spaces/1phancelerku/anime-remove-background/Angry Birds Classic The Game that Made History.md deleted file mode 100644 index bc43901e4d1683b40c828624df11932cdb7277ae..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Angry Birds Classic The Game that Made History.md +++ /dev/null @@ -1,83 +0,0 @@ - - Angry Birds Classic: A Fun and Addictive Game for Everyone-If you are looking for a casual and entertaining game that will keep you hooked for hours, you might want to check out Angry Birds Classic. This is the original game that started the global phenomenon of Angry Birds, a series of games that feature colorful birds who try to save their eggs from greedy pigs. -Angry Birds Classic was first released in 2009 for iOS devices, and since then it has been downloaded over 2 billion times across all platforms. The game has been praised for its fun gameplay, comical style, and low price. It has also spawned many spin-offs, sequels, movies, and merchandise featuring its characters. -angry birds classic download app storeDownload Zip === https://jinyurl.com/2uNNMx - Features-The gameplay of Angry Birds Classic is simple but challenging. You use a slingshot to launch the birds at the pigs' fortresses, which are made of various materials such as wood, glass, and stone. You have to use logic, skill, and force to destroy all the pigs on each level. -The game features 15 original episodes with over 680 levels to play. Each episode has a different theme and introduces new types of birds with unique abilities. For example, the yellow bird can speed up in mid-air, the black bird can explode like a bomb, and the white bird can drop egg bombs. -You can also compete against other players in the Mighty League, where you can earn coins and power-ups by playing daily challenges. Power-ups can boost your birds' destructive strength by giving them extra speed, size, or aim. You can also use the Mighty Eagle, a super-powered bird that can clear any level with ease. -Platforms-Angry Birds Classic is available for download on various devices, including smartphones, tablets, computers, and consoles. You can find it on the App Store for iOS devices , Google Play Store for Android devices , Amazon Appstore for Kindle Fire devices , and Windows Store for Windows devices . You can also play it on your web browser using Google Chrome or Facebook . -The game is free to download and play on most platforms, but it may require internet connectivity and data charges may apply. The game may also include in-app purchases, advertisements, and links to other websites or social networks. -How to download Angry Birds Classic for free on iOS Tips and tricks-If you want to master Angry Birds Classic and get three stars on every level, you may need some tips and tricks to help you out. Here are some of them: -
Reviews-Angry Birds Classic has received mostly positive reviews from critics and players alike. The game has a rating of 4.5 out of 5 stars on the App Store , 4.4 out of 5 stars on the Google Play Store , and 4.6 out of 5 stars on the Amazon Appstore . -Some of the praises for the game are: --- -- -- Conclusion-Angry Birds Classic is a game that has earned its place in the history of mobile gaming. It is a game that appeals to people of all ages and backgrounds, with its simple yet addictive gameplay, charming style, and low price. It is a game that you can download and play on almost any device, whether you are at home or on the go. -If you have not played Angry Birds Classic yet, you are missing out on a lot of fun and entertainment. You can download it for free from your preferred app store or play it online using your web browser. You will not regret it. -So what are you waiting for? Grab your slingshot and join the Angry Birds in their quest to defeat the pigs and save their eggs. You will have a blast! -FAQs-What is the difference between Angry Birds Classic and Angry Birds 2?-Angry Birds 2 is the sequel to Angry Birds Classic, released in 2015. It features new graphics, levels, birds, pigs, power-ups, spells, bosses, and multiplayer modes. However, it also includes more in-app purchases, advertisements, lives, and randomness than Angry Birds Classic. -How many Angry Birds games are there?-There are over 20 Angry Birds games as of 2021, including spin-offs, sequels, collaborations, and compilations. Some of the most popular ones are Angry Birds Seasons, Angry Birds Rio, Angry Birds Space, Angry Birds Star Wars, Angry Birds Go!, Angry Birds Epic, Angry Birds Transformers, Angry Birds Friends, Angry Birds Match, and Angry Birds Dream Blast. -Are there any movies or shows based on Angry Birds?-Yes, there are two animated movies based on Angry Birds: The Angry Birds Movie (2016) and The Angry Birds Movie 2 (2019). There are also several animated shows based on Angry Birds: Angry Birds Toons (2013-2016), Piggy Tales (2014-2018), Stella (2014-2016), Angry Birds Blues (2017), and Angry Birds MakerSpace (2019-present). -Who created Angry Birds?-Angry Birds was created by Rovio Entertainment, a Finnish video game company founded in 2003. The original idea for the game was inspired by a sketch of stylized wingless birds by Jaakko Iisalo, a senior game designer at Rovio. -Why are the birds angry?-The birds are angry because the pigs stole their eggs and want to eat them. The birds want to get their eggs back and stop the pigs from eating them. The birds use their slingshot and their special abilities to attack the pigs and their structures. 401be4b1e0- - \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Smash the Dummy Mod APK and Enjoy Ragdoll Physics and Stress Relief.md b/spaces/1phancelerku/anime-remove-background/Download Smash the Dummy Mod APK and Enjoy Ragdoll Physics and Stress Relief.md deleted file mode 100644 index 512a6d2accc0a8aac08d80bf34364d57e0aa4c59..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Smash the Dummy Mod APK and Enjoy Ragdoll Physics and Stress Relief.md +++ /dev/null @@ -1,101 +0,0 @@ - - Smash the Dummy Mod Apk: A Fun and Stress-Relieving Game-Have you ever felt stressed, angry, or frustrated and wished you could vent your emotions on something or someone? Well, now you can with smash the dummy mod apk, a fun and stress-relieving game that lets you punch, shoot, and kick a virtual dummy or voodoo doll. Smash the dummy mod apk is a modified version of the original game, Smash the Dummy: Beat Boss Kick Buddy Ragdoll Game, that gives you unlimited resources and features to enjoy. In this article, we will tell you what smash the dummy mod apk is, why it is popular, how to download and install it, how to play it, what are its benefits and drawbacks, and our final verdict on it. -smash the dummy mod apkDownload Zip >>>>> https://jinyurl.com/2uNPdk - How to Download and Install Smash the Dummy Mod Apk-If you want to play smash the dummy mod apk on your Android device, you will need to follow these steps: -
How to Play Smash the Dummy Mod Apk-Choose Your Dummy and Weapon-When you start playing smash the dummy mod apk, you will be able to choose from different types of dummies and weapons to smash them. You can select from various categories such as animals, zombies, superheroes, celebrities , and more. You can also choose from different weapons such as guns, knives, hammers, rockets, grenades, and more. Each dummy and weapon has its own characteristics and effects, so you can experiment with different combinations and see what happens. -Smash, Shoot, and Kick the Dummy-Once you have chosen your dummy and weapon, you can start smashing, shooting, and kicking the dummy. You can use various gestures and actions to inflict damage on the dummy, such as tapping, swiping, dragging, pinching, and shaking. You can also use the buttons on the screen to perform different actions, such as throwing the dummy, changing the weapon, or activating special features. The more you smash the dummy, the more damage you will cause and the more fun you will have. -Earn Coins and Diamonds-As you play smash the dummy mod apk, you will also earn coins and diamonds by smashing the dummy and completing missions. Coins and diamonds are the in-game currencies that you can use to unlock new dummies and weapons. You can also use them to upgrade your weapons and increase their power and effects. You can earn coins and diamonds by playing the game regularly, watching ads, or using the modded features of the game. -Unlock New Dummies and Weapons-With the coins and diamonds you earn, you can unlock new dummies and weapons to smash them. You can access the shop from the main menu and browse through different categories of dummies and weapons. You can also see their prices and descriptions before buying them. Some of the dummies and weapons are locked until you reach a certain level or complete a certain mission. You can also use the modded features of the game to unlock all the dummies and weapons for free. -* Smash the dummy ragdoll game mod apk Benefits of Playing Smash the Dummy Mod Apk-Relieve Stress and Anger-One of the main benefits of playing smash the dummy mod apk is that it can help you relieve stress and anger. Sometimes, life can be stressful and frustrating, and you may feel like taking out your emotions on something or someone. However, doing so in real life can have negative consequences for yourself and others. That's why playing smash the dummy mod apk can be a safe and fun way to vent your emotions and have fun. You can smash the dummy as much as you want without hurting anyone or anything. You can also choose a dummy that resembles someone or something that annoys you or makes you angry, such as your boss, your ex, or a politician. -Improve Your Reflexes and Coordination-Another benefit of playing smash the dummy mod apk is that it can improve your reflexes and coordination. Playing smash the dummy mod apk requires you to use your fingers to perform various gestures and actions on the screen. This can enhance your hand-eye coordination and reaction time. You can also challenge yourself by trying to smash the dummy as fast as possible or by using different weapons and features. Playing smash the dummy mod apk can also improve your concentration and focus as you try to smash the dummy without missing or getting distracted. -Enjoy Unlimited Resources and Features-A third benefit of playing smash the dummy mod apk is that it can give you access to unlimited resources and features that are not available in the original version of the game. With smash the dummy mod apk, you can enjoy unlimited coins, diamonds, dummies, weapons, and other features that can make your game more enjoyable. You can unlock all the dummies and weapons for free and use them without any limitations. You can also use the modded features of the game to activate special effects, such as slow motion, ragdoll physics, explosions, and more. Playing smash the dummy mod apk can make your game more fun and exciting. -Drawbacks of Playing Smash the Dummy Mod Apk-Risk of Malware and Viruses-One of the main drawbacks of playing smash the dummy mod apk is that it can expose your device to malware and viruses that can harm your data and privacy. Since smash the dummy mod apk is not from the official Google Play Store, you will need to download and install it from unknown sources that may not be safe or reliable. Some of these sources may contain malicious files or codes that can infect your device and steal your personal information, such as your contacts, photos, messages, passwords, and more. You may also experience unwanted ads, pop-ups, redirects, or crashes on your device. Therefore, you should be careful when downloading and installing smash the dummy mod apk and use a good antivirus software to scan your device regularly. -Risk of Ban and Suspension-Another drawback of playing smash the dummy mod apk is that it can violate the terms and conditions of the original game developer and result in your account being banned or suspended. Since smash the dummy mod apk is a modified version of the original game, Smash the Dummy: Beat Boss Kick Buddy Ragdoll Game, it can give you an unfair advantage over other players who play the original game. This can affect the balance and fairness of the game and make it less enjoyable for others. The original game developer may detect your use of smash the dummy mod apk and ban or suspend your account for cheating or hacking. You may also lose your progress, achievements, and rewards in the game. Therefore, you should be aware of the risks and consequences of playing smash the dummy mod apk and respect the rules and rights of the original game developer. -Risk of Addiction and Violence-A third drawback of playing smash the dummy mod apk is that it can become addictive and influence your behavior and attitude towards violence in real life. Playing smash the dummy mod apk can be very entertaining and satisfying, but it can also make you spend too much time and energy on it. You may neglect your other responsibilities, such as your work, school, family, or friends. You may also become obsessed with smashing the dummy and forget about other hobbies or interests. Playing smash the dummy mod apk can also affect your mental health and well-being, as you may develop aggression, hostility, or desensitization towards violence. You may start to enjoy hurting or harming others, even if they are virtual or fictional. You may also lose empathy or compassion for others who suffer from violence in real life. Therefore, you should play smash the dummy mod apk in moderation and balance it with other activities that are healthy and positive. -Conclusion-Smash the dummy mod apk is a fun and stress-relieving game that lets you punch, shoot, and kick a virtual dummy or voodoo doll. It is a modified version of the original game that gives you unlimited resources and features to enjoy. However, it also has some drawbacks that you should be aware of before playing it. In this article, we have explained what smash the dummy mod apk is, why it is popular, how to download and install it, how to play it, what are its benefits and drawbacks, and our final verdict on it. -In our opinion, smash the dummy mod apk is a good game to play if you want to relieve stress and anger, improve your reflexes and coordination , and enjoy unlimited resources and features. However, you should also be careful of the risks of malware and viruses, ban and suspension, and addiction and violence. You should also respect the original game developer and play the game in moderation and balance. We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to share them in the comments section below. -FAQs-Here are some of the frequently asked questions about smash the dummy mod apk: -
The main difference between smash the dummy mod apk and the original game is that the modded version gives you unlimited coins, diamonds, dummies, weapons, and other features that are not available in the original game. You can also use the modded features to activate special effects, such as slow motion, ragdoll physics, explosions, and more. -Smash the dummy mod apk is not from the official Google Play Store, so you will need to download and install it from unknown sources that may not be safe or reliable. Some of these sources may contain malicious files or codes that can infect your device and steal your personal information. Therefore, you should be careful when downloading and installing smash the dummy mod apk and use a good antivirus software to scan your device regularly. -No, smash the dummy mod apk is not an online game, so you cannot play it with other players. It is a single-player game that you can play offline on your device. However, you may need an internet connection to access some of the features of the game, such as watching ads or downloading new dummies and weapons. -If you want to update smash the dummy mod apk to the latest version, you will need to visit the website where you downloaded it from and check if there is a new version available. If there is, you can download and install it on your device following the same steps as before. However, you may lose your progress and data in the game if you update it, so you may want to back up your files before doing so. -If you want to uninstall smash the dummy mod apk from your device, you can follow these steps: -
- - \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Free Download M-PESA App and Send Money with Gifs Description and Profile Picture.md b/spaces/1phancelerku/anime-remove-background/Free Download M-PESA App and Send Money with Gifs Description and Profile Picture.md deleted file mode 100644 index 2c3926fc53923905547ead9f3d8365d8246a016c..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Free Download M-PESA App and Send Money with Gifs Description and Profile Picture.md +++ /dev/null @@ -1,114 +0,0 @@ - - Free Download M-Pesa App: How to Enjoy the Benefits of Mobile Money Transfer-Do you want to make your life easier by managing your finances on your phone? Do you want to send and receive money, pay bills, buy goods and services, and more with just a few taps? Do you want to enjoy convenience, security, affordability, and accessibility with mobile money transfer? If you answered yes to any of these questions, then you should download the M-Pesa app today. -What is M-Pesa and why should you use it?-M-Pesa is a mobile money service that allows you to send and receive money, pay bills, buy goods and services, and more using your phone. It is operated by Safaricom, the leading mobile network operator in Kenya. M-Pesa has over 40 million users in Kenya and other countries such as Tanzania, Lesotho, Mozambique, Ghana, Egypt, India, Romania, Albania, South Africa. -free download m pesa appDownload ===> https://jinyurl.com/2uNKbe - M-Pesa has many benefits such as convenience, security, affordability, and accessibility-Some of the benefits of using M-Pesa are: -
With M-Pesa, you can enjoy the benefits of mobile money transfer without any hassle. -How to download and install the M-Pesa app on your phone?-If you want to enjoy the benefits of M-Pesa, you need to download and install the M-Pesa app on your phone. The M-Pesa app is available for both Android and iOS devices. Here are the steps to download and install the app: -The M-Pesa app is available for both Android and iOS devices-You can download the app from the Google Play Store or the Apple Store for free. You can also scan the QR code below to download the app: -
You need to have an active M-Pesa account and a registered SIM card to use the app-If you don't have an M-Pesa account, you need to register for one at any Safaricom shop or agent. You will need to provide your ID and phone number. You will also receive a PIN that you will use to access your account. -free download m pesa app for android If you already have an M-Pesa account, you need to make sure that your SIM card is registered and active. You can check your SIM registration status by dialing *234# on your phone. -You can log in to the app using your M-Pesa PIN or biometric authentication-Once you have downloaded and installed the app, you can open it and log in using your M-Pesa PIN or biometric authentication. Biometric authentication is a feature that allows you to use your fingerprint or face recognition to access your account. You can enable this feature in the settings of the app. -After logging in, you will see your account balance and a menu of options that you can use to perform various transactions. How to use the M-Pesa app to perform various transactions?-The M-Pesa app has a simple and user-friendly interface that allows you to access all the core M-Pesa features. You can send money, buy goods, pay bills, withdraw cash, buy airtime, and more using the app. You can also access other features such as M-Pesa Global, Pochi la Biashara, Due Bills, Buy Bundles, and Mini Apps. Here are some of the ways you can use the M-Pesa app to perform various transactions: -You can send money, buy goods, pay bills, withdraw cash, buy airtime, and more using the app-To send money, you can select the Send Money option from the menu and enter the recipient's phone number or name from your contacts. You can also scan or generate a QR code to send money. You can then enter the amount and confirm with your PIN or biometric authentication. -To buy goods, you can select the Lipa Na M-Pesa option from the menu and enter the till number or name of the merchant. You can also scan or generate a QR code to buy goods. You can then enter the amount and confirm with your PIN or biometric authentication. -To pay bills, you can select the Pay Bill option from the menu and enter the business number or name of the biller. You can also scan or generate a QR code to pay bills. You can then enter the account number and amount and confirm with your PIN or biometric authentication. -To withdraw cash, you can select the Withdraw Cash option from the menu and enter the agent number or name of the agent. You can also scan or generate a QR code to withdraw cash. You can then enter the amount and confirm with your PIN or biometric authentication. -To buy airtime, you can select the Buy Airtime option from the menu and enter your phone number or name from your contacts. You can then enter the amount and confirm with your PIN or biometric authentication. -You can also access other features such as M-Pesa Global, Pochi la Biashara, Due Bills, Buy Bundles, and Mini Apps-M-Pesa Global is a feature that allows you to send and receive money across different countries and currencies. You can select the M-Pesa Global option from the menu and choose whether you want to send money abroad or receive money from abroad. You can then follow the instructions on the screen to complete your transaction. -Pochi la Biashara is a feature that allows you to receive payments from customers without revealing your personal details. You can select the Pochi la Biashara option from the menu and create your own Pochi la Biashara account. You can then share your Pochi la Biashara name with your customers and receive payments directly to your account. -Due Bills is a feature that allows you to view and pay your pending bills in one place. You can select the Due Bills option from the menu and see all your due bills from different billers. You can then choose which bills you want to pay and confirm with your PIN or biometric authentication. -Buy Bundles is a feature that allows you to buy data, voice, SMS, and other bundles using your M-Pesa balance. You can select the Buy Bundles option from the menu and choose which bundle you want to buy. You can then confirm with your PIN or biometric authentication. -Mini Apps is a feature that allows you to access various apps such as travel, lifestyle, utility, and more without having to download them. You can select the Mini Apps option from the menu and browse through different categories of apps. You can then choose which app you want to use and enjoy its services. How to track your spending and transactions in real-time using the My Spend and Statement features-The M-Pesa app also allows you to track your spending and transactions in real-time using the My Spend and Statement features. These features help you to manage your finances and budget better. Here is how you can use them: -You can track your spending and transactions in real-time using the My Spend feature-The My Spend feature shows you how much you have spent on different categories such as food, transport, entertainment, and more. You can also see how much you have saved, invested, or donated. You can access the My Spend feature by selecting the My Spend option from the menu. You can then see a graphical representation of your spending habits and trends. You can also filter your spending by date, category, or amount. -You can track your spending and transactions in real-time using the Statement feature-The Statement feature shows you a detailed history of all your transactions such as sending money, buying goods, paying bills, withdrawing cash, buying airtime, and more. You can also see the status, date, time, amount, and fee of each transaction. You can access the Statement feature by selecting the Statement option from the menu. You can then see a list of all your transactions and search for a specific transaction by date, amount, or description. -Conclusion-The M-Pesa app is a great way to enjoy the benefits of mobile money transfer. The app is free, easy to use, secure, and offers many features and services. You can download the app today and start your journey to convenience with M-Pesa. -FAQs-Here are some of the frequently asked questions about the M-Pesa app: -
- - \ No newline at end of file diff --git a/spaces/1toTree/lora_test/ppdiffusers/models/unet_1d.py b/spaces/1toTree/lora_test/ppdiffusers/models/unet_1d.py deleted file mode 100644 index 864cbf089cefb893e0d8274cc58d3a3ddd3a634b..0000000000000000000000000000000000000000 --- a/spaces/1toTree/lora_test/ppdiffusers/models/unet_1d.py +++ /dev/null @@ -1,247 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from dataclasses import dataclass -from typing import Optional, Tuple, Union - -import paddle -import paddle.nn as nn - -from ..configuration_utils import ConfigMixin, register_to_config -from ..modeling_utils import ModelMixin -from ..utils import BaseOutput -from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps -from .unet_1d_blocks import get_down_block, get_mid_block, get_out_block, get_up_block - - -@dataclass -class UNet1DOutput(BaseOutput): - """ - Args: - sample (`paddle.Tensor` of shape `(batch_size, num_channels, sample_size)`): - Hidden states output. Output of last layer of model. - """ - - sample: paddle.Tensor - - -class UNet1DModel(ModelMixin, ConfigMixin): - r""" - UNet1DModel is a 1D UNet model that takes in a noisy sample and a timestep and returns sample shaped output. - - This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library - implements for all the model (such as downloading or saving, etc.) - - Parameters: - sample_size (`int`, *optional*): Default length of sample. Should be adaptable at runtime. - in_channels (`int`, *optional*, defaults to 2): Number of channels in the input sample. - out_channels (`int`, *optional*, defaults to 2): Number of channels in the output. - time_embedding_type (`str`, *optional*, defaults to `"fourier"`): Type of time embedding to use. - freq_shift (`float`, *optional*, defaults to 0.0): Frequency shift for fourier time embedding. - flip_sin_to_cos (`bool`, *optional*, defaults to : - obj:`False`): Whether to flip sin to cos for fourier time embedding. - down_block_types (`Tuple[str]`, *optional*, defaults to : - obj:`("DownBlock1D", "DownBlock1DNoSkip", "AttnDownBlock1D")`): Tuple of downsample block types. - up_block_types (`Tuple[str]`, *optional*, defaults to : - obj:`("UpBlock1D", "UpBlock1DNoSkip", "AttnUpBlock1D")`): Tuple of upsample block types. - block_out_channels (`Tuple[int]`, *optional*, defaults to : - obj:`(32, 32, 64)`): Tuple of block output channels. - mid_block_type (`str`, *optional*, defaults to "UNetMidBlock1D"): block type for middle of UNet. - out_block_type (`str`, *optional*, defaults to `None`): optional output processing of UNet. - act_fn (`str`, *optional*, defaults to None): optional activitation function in UNet blocks. - norm_num_groups (`int`, *optional*, defaults to 8): group norm member count in UNet blocks. - layers_per_block (`int`, *optional*, defaults to 1): added number of layers in a UNet block. - downsample_each_block (`int`, *optional*, defaults to False: - experimental feature for using a UNet without upsampling. - """ - - @register_to_config - def __init__( - self, - sample_size: int = 65536, - sample_rate: Optional[int] = None, - in_channels: int = 2, - out_channels: int = 2, - extra_in_channels: int = 0, - time_embedding_type: str = "fourier", - flip_sin_to_cos: bool = True, - use_timestep_embedding: bool = False, - freq_shift: float = 0.0, - down_block_types: Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D"), - up_block_types: Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip"), - mid_block_type: Tuple[str] = "UNetMidBlock1D", - out_block_type: str = None, - block_out_channels: Tuple[int] = (32, 32, 64), - act_fn: str = None, - norm_num_groups: int = 8, - layers_per_block: int = 1, - downsample_each_block: bool = False, - ): - super().__init__() - self.sample_size = sample_size - - # time - if time_embedding_type == "fourier": - self.time_proj = GaussianFourierProjection( - embedding_size=8, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos - ) - timestep_input_dim = 2 * block_out_channels[0] - elif time_embedding_type == "positional": - self.time_proj = Timesteps( - block_out_channels[0], flip_sin_to_cos=flip_sin_to_cos, downscale_freq_shift=freq_shift - ) - timestep_input_dim = block_out_channels[0] - - if use_timestep_embedding: - time_embed_dim = block_out_channels[0] * 4 - self.time_mlp = TimestepEmbedding( - in_channels=timestep_input_dim, - time_embed_dim=time_embed_dim, - act_fn=act_fn, - out_dim=block_out_channels[0], - ) - - self.down_blocks = nn.LayerList([]) - self.mid_block = None - self.up_blocks = nn.LayerList([]) - self.out_block = None - - # down - output_channel = in_channels - for i, down_block_type in enumerate(down_block_types): - input_channel = output_channel - output_channel = block_out_channels[i] - - if i == 0: - input_channel += extra_in_channels - - is_final_block = i == len(block_out_channels) - 1 - - down_block = get_down_block( - down_block_type, - num_layers=layers_per_block, - in_channels=input_channel, - out_channels=output_channel, - temb_channels=block_out_channels[0], - add_downsample=not is_final_block or downsample_each_block, - ) - self.down_blocks.append(down_block) - - # mid - self.mid_block = get_mid_block( - mid_block_type, - in_channels=block_out_channels[-1], - mid_channels=block_out_channels[-1], - out_channels=block_out_channels[-1], - embed_dim=block_out_channels[0], - num_layers=layers_per_block, - add_downsample=downsample_each_block, - ) - - # up - reversed_block_out_channels = list(reversed(block_out_channels)) - output_channel = reversed_block_out_channels[0] - if out_block_type is None: - final_upsample_channels = out_channels - else: - final_upsample_channels = block_out_channels[0] - - for i, up_block_type in enumerate(up_block_types): - prev_output_channel = output_channel - output_channel = ( - reversed_block_out_channels[i + 1] if i < len(up_block_types) - 1 else final_upsample_channels - ) - - is_final_block = i == len(block_out_channels) - 1 - - up_block = get_up_block( - up_block_type, - num_layers=layers_per_block, - in_channels=prev_output_channel, - out_channels=output_channel, - temb_channels=block_out_channels[0], - add_upsample=not is_final_block, - ) - self.up_blocks.append(up_block) - prev_output_channel = output_channel - - # out - num_groups_out = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32) - self.out_block = get_out_block( - out_block_type=out_block_type, - num_groups_out=num_groups_out, - embed_dim=block_out_channels[0], - out_channels=out_channels, - act_fn=act_fn, - fc_dim=block_out_channels[-1] // 4, - ) - - def forward( - self, - sample: paddle.Tensor, - timestep: Union[paddle.Tensor, float, int], - return_dict: bool = True, - ) -> Union[UNet1DOutput, Tuple]: - r""" - Args: - sample (`paddle.Tensor`): `(batch_size, sample_size, num_channels)` noisy inputs tensor - timestep (`paddle.Tensor` or `float` or `int): (batch) timesteps - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~models.unet_1d.UNet1DOutput`] instead of a plain tuple. - - Returns: - [`~models.unet_1d.UNet1DOutput`] or `tuple`: [`~models.unet_1d.UNet1DOutput`] if `return_dict` is True, - otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. - """ - - # 1. time - timesteps = timestep - if not paddle.is_tensor(timesteps): - timesteps = paddle.to_tensor([timesteps], dtype="int64") - elif paddle.is_tensor(timesteps) and len(timesteps.shape) == 0: - timesteps = timesteps[None] - - timestep_embed = self.time_proj(timesteps) - if self.config.use_timestep_embedding: - timestep_embed = self.time_mlp(timestep_embed) - else: - timestep_embed = timestep_embed[..., None] - timestep_embed = timestep_embed.tile([1, 1, sample.shape[2]]).cast(sample.dtype) - timestep_embed = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:])) - - # 2. down - down_block_res_samples = () - for downsample_block in self.down_blocks: - sample, res_samples = downsample_block(hidden_states=sample, temb=timestep_embed) - down_block_res_samples += res_samples - - # 3. mid - if self.mid_block: - sample = self.mid_block(sample, timestep_embed) - - # 4. up - for i, upsample_block in enumerate(self.up_blocks): - res_samples = down_block_res_samples[-1:] - down_block_res_samples = down_block_res_samples[:-1] - sample = upsample_block(sample, res_hidden_states_tuple=res_samples, temb=timestep_embed) - - # 5. post-process - if self.out_block: - sample = self.out_block(sample, timestep_embed) - - if not return_dict: - return (sample,) - - return UNet1DOutput(sample=sample) diff --git a/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/eval_ijbc.py b/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/eval_ijbc.py deleted file mode 100644 index 9c5a650d486d18eb02d6f60d448fc3b315261f5d..0000000000000000000000000000000000000000 --- a/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/eval_ijbc.py +++ /dev/null @@ -1,483 +0,0 @@ -# coding: utf-8 - -import os -import pickle - -import matplotlib -import pandas as pd - -matplotlib.use('Agg') -import matplotlib.pyplot as plt -import timeit -import sklearn -import argparse -import cv2 -import numpy as np -import torch -from skimage import transform as trans -from backbones import get_model -from sklearn.metrics import roc_curve, auc - -from menpo.visualize.viewmatplotlib import sample_colours_from_colourmap -from prettytable import PrettyTable -from pathlib import Path - -import sys -import warnings - -sys.path.insert(0, "../") -warnings.filterwarnings("ignore") - -parser = argparse.ArgumentParser(description='do ijb test') -# general -parser.add_argument('--model-prefix', default='', help='path to load model.') -parser.add_argument('--image-path', default='', type=str, help='') -parser.add_argument('--result-dir', default='.', type=str, help='') -parser.add_argument('--batch-size', default=128, type=int, help='') -parser.add_argument('--network', default='iresnet50', type=str, help='') -parser.add_argument('--job', default='insightface', type=str, help='job name') -parser.add_argument('--target', default='IJBC', type=str, help='target, set to IJBC or IJBB') -args = parser.parse_args() - -target = args.target -model_path = args.model_prefix -image_path = args.image_path -result_dir = args.result_dir -gpu_id = None -use_norm_score = True # if Ture, TestMode(N1) -use_detector_score = True # if Ture, TestMode(D1) -use_flip_test = True # if Ture, TestMode(F1) -job = args.job -batch_size = args.batch_size - - -class Embedding(object): - def __init__(self, prefix, data_shape, batch_size=1): - image_size = (112, 112) - self.image_size = image_size - weight = torch.load(prefix) - resnet = get_model(args.network, dropout=0, fp16=False).cuda() - resnet.load_state_dict(weight) - model = torch.nn.DataParallel(resnet) - self.model = model - self.model.eval() - src = np.array([ - [30.2946, 51.6963], - [65.5318, 51.5014], - [48.0252, 71.7366], - [33.5493, 92.3655], - [62.7299, 92.2041]], dtype=np.float32) - src[:, 0] += 8.0 - self.src = src - self.batch_size = batch_size - self.data_shape = data_shape - - def get(self, rimg, landmark): - - assert landmark.shape[0] == 68 or landmark.shape[0] == 5 - assert landmark.shape[1] == 2 - if landmark.shape[0] == 68: - landmark5 = np.zeros((5, 2), dtype=np.float32) - landmark5[0] = (landmark[36] + landmark[39]) / 2 - landmark5[1] = (landmark[42] + landmark[45]) / 2 - landmark5[2] = landmark[30] - landmark5[3] = landmark[48] - landmark5[4] = landmark[54] - else: - landmark5 = landmark - tform = trans.SimilarityTransform() - tform.estimate(landmark5, self.src) - M = tform.params[0:2, :] - img = cv2.warpAffine(rimg, - M, (self.image_size[1], self.image_size[0]), - borderValue=0.0) - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - img_flip = np.fliplr(img) - img = np.transpose(img, (2, 0, 1)) # 3*112*112, RGB - img_flip = np.transpose(img_flip, (2, 0, 1)) - input_blob = np.zeros((2, 3, self.image_size[1], self.image_size[0]), dtype=np.uint8) - input_blob[0] = img - input_blob[1] = img_flip - return input_blob - - @torch.no_grad() - def forward_db(self, batch_data): - imgs = torch.Tensor(batch_data).cuda() - imgs.div_(255).sub_(0.5).div_(0.5) - feat = self.model(imgs) - feat = feat.reshape([self.batch_size, 2 * feat.shape[1]]) - return feat.cpu().numpy() - - -# 将一个list尽量均分成n份,限制len(list)==n,份数大于原list内元素个数则分配空list[] -def divideIntoNstrand(listTemp, n): - twoList = [[] for i in range(n)] - for i, e in enumerate(listTemp): - twoList[i % n].append(e) - return twoList - - -def read_template_media_list(path): - # ijb_meta = np.loadtxt(path, dtype=str) - ijb_meta = pd.read_csv(path, sep=' ', header=None).values - templates = ijb_meta[:, 1].astype(np.int) - medias = ijb_meta[:, 2].astype(np.int) - return templates, medias - - -# In[ ]: - - -def read_template_pair_list(path): - # pairs = np.loadtxt(path, dtype=str) - pairs = pd.read_csv(path, sep=' ', header=None).values - # print(pairs.shape) - # print(pairs[:, 0].astype(np.int)) - t1 = pairs[:, 0].astype(np.int) - t2 = pairs[:, 1].astype(np.int) - label = pairs[:, 2].astype(np.int) - return t1, t2, label - - -# In[ ]: - - -def read_image_feature(path): - with open(path, 'rb') as fid: - img_feats = pickle.load(fid) - return img_feats - - -# In[ ]: - - -def get_image_feature(img_path, files_list, model_path, epoch, gpu_id): - batch_size = args.batch_size - data_shape = (3, 112, 112) - - files = files_list - print('files:', len(files)) - rare_size = len(files) % batch_size - faceness_scores = [] - batch = 0 - img_feats = np.empty((len(files), 1024), dtype=np.float32) - - batch_data = np.empty((2 * batch_size, 3, 112, 112)) - embedding = Embedding(model_path, data_shape, batch_size) - for img_index, each_line in enumerate(files[:len(files) - rare_size]): - name_lmk_score = each_line.strip().split(' ') - img_name = os.path.join(img_path, name_lmk_score[0]) - img = cv2.imread(img_name) - lmk = np.array([float(x) for x in name_lmk_score[1:-1]], - dtype=np.float32) - lmk = lmk.reshape((5, 2)) - input_blob = embedding.get(img, lmk) - - batch_data[2 * (img_index - batch * batch_size)][:] = input_blob[0] - batch_data[2 * (img_index - batch * batch_size) + 1][:] = input_blob[1] - if (img_index + 1) % batch_size == 0: - print('batch', batch) - img_feats[batch * batch_size:batch * batch_size + - batch_size][:] = embedding.forward_db(batch_data) - batch += 1 - faceness_scores.append(name_lmk_score[-1]) - - batch_data = np.empty((2 * rare_size, 3, 112, 112)) - embedding = Embedding(model_path, data_shape, rare_size) - for img_index, each_line in enumerate(files[len(files) - rare_size:]): - name_lmk_score = each_line.strip().split(' ') - img_name = os.path.join(img_path, name_lmk_score[0]) - img = cv2.imread(img_name) - lmk = np.array([float(x) for x in name_lmk_score[1:-1]], - dtype=np.float32) - lmk = lmk.reshape((5, 2)) - input_blob = embedding.get(img, lmk) - batch_data[2 * img_index][:] = input_blob[0] - batch_data[2 * img_index + 1][:] = input_blob[1] - if (img_index + 1) % rare_size == 0: - print('batch', batch) - img_feats[len(files) - - rare_size:][:] = embedding.forward_db(batch_data) - batch += 1 - faceness_scores.append(name_lmk_score[-1]) - faceness_scores = np.array(faceness_scores).astype(np.float32) - # img_feats = np.ones( (len(files), 1024), dtype=np.float32) * 0.01 - # faceness_scores = np.ones( (len(files), ), dtype=np.float32 ) - return img_feats, faceness_scores - - -# In[ ]: - - -def image2template_feature(img_feats=None, templates=None, medias=None): - # ========================================================== - # 1. face image feature l2 normalization. img_feats:[number_image x feats_dim] - # 2. compute media feature. - # 3. compute template feature. - # ========================================================== - unique_templates = np.unique(templates) - template_feats = np.zeros((len(unique_templates), img_feats.shape[1])) - - for count_template, uqt in enumerate(unique_templates): - - (ind_t,) = np.where(templates == uqt) - face_norm_feats = img_feats[ind_t] - face_medias = medias[ind_t] - unique_medias, unique_media_counts = np.unique(face_medias, - return_counts=True) - media_norm_feats = [] - for u, ct in zip(unique_medias, unique_media_counts): - (ind_m,) = np.where(face_medias == u) - if ct == 1: - media_norm_feats += [face_norm_feats[ind_m]] - else: # image features from the same video will be aggregated into one feature - media_norm_feats += [ - np.mean(face_norm_feats[ind_m], axis=0, keepdims=True) - ] - media_norm_feats = np.array(media_norm_feats) - # media_norm_feats = media_norm_feats / np.sqrt(np.sum(media_norm_feats ** 2, -1, keepdims=True)) - template_feats[count_template] = np.sum(media_norm_feats, axis=0) - if count_template % 2000 == 0: - print('Finish Calculating {} template features.'.format( - count_template)) - # template_norm_feats = template_feats / np.sqrt(np.sum(template_feats ** 2, -1, keepdims=True)) - template_norm_feats = sklearn.preprocessing.normalize(template_feats) - # print(template_norm_feats.shape) - return template_norm_feats, unique_templates - - -# In[ ]: - - -def verification(template_norm_feats=None, - unique_templates=None, - p1=None, - p2=None): - # ========================================================== - # Compute set-to-set Similarity Score. - # ========================================================== - template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int) - for count_template, uqt in enumerate(unique_templates): - template2id[uqt] = count_template - - score = np.zeros((len(p1),)) # save cosine distance between pairs - - total_pairs = np.array(range(len(p1))) - batchsize = 100000 # small batchsize instead of all pairs in one batch due to the memory limiation - sublists = [ - total_pairs[i:i + batchsize] for i in range(0, len(p1), batchsize) - ] - total_sublists = len(sublists) - for c, s in enumerate(sublists): - feat1 = template_norm_feats[template2id[p1[s]]] - feat2 = template_norm_feats[template2id[p2[s]]] - similarity_score = np.sum(feat1 * feat2, -1) - score[s] = similarity_score.flatten() - if c % 10 == 0: - print('Finish {}/{} pairs.'.format(c, total_sublists)) - return score - - -# In[ ]: -def verification2(template_norm_feats=None, - unique_templates=None, - p1=None, - p2=None): - template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int) - for count_template, uqt in enumerate(unique_templates): - template2id[uqt] = count_template - score = np.zeros((len(p1),)) # save cosine distance between pairs - total_pairs = np.array(range(len(p1))) - batchsize = 100000 # small batchsize instead of all pairs in one batch due to the memory limiation - sublists = [ - total_pairs[i:i + batchsize] for i in range(0, len(p1), batchsize) - ] - total_sublists = len(sublists) - for c, s in enumerate(sublists): - feat1 = template_norm_feats[template2id[p1[s]]] - feat2 = template_norm_feats[template2id[p2[s]]] - similarity_score = np.sum(feat1 * feat2, -1) - score[s] = similarity_score.flatten() - if c % 10 == 0: - print('Finish {}/{} pairs.'.format(c, total_sublists)) - return score - - -def read_score(path): - with open(path, 'rb') as fid: - img_feats = pickle.load(fid) - return img_feats - - -# # Step1: Load Meta Data - -# In[ ]: - -assert target == 'IJBC' or target == 'IJBB' - -# ============================================================= -# load image and template relationships for template feature embedding -# tid --> template id, mid --> media id -# format: -# image_name tid mid -# ============================================================= -start = timeit.default_timer() -templates, medias = read_template_media_list( - os.path.join('%s/meta' % image_path, - '%s_face_tid_mid.txt' % target.lower())) -stop = timeit.default_timer() -print('Time: %.2f s. ' % (stop - start)) - -# In[ ]: - -# ============================================================= -# load template pairs for template-to-template verification -# tid : template id, label : 1/0 -# format: -# tid_1 tid_2 label -# ============================================================= -start = timeit.default_timer() -p1, p2, label = read_template_pair_list( - os.path.join('%s/meta' % image_path, - '%s_template_pair_label.txt' % target.lower())) -stop = timeit.default_timer() -print('Time: %.2f s. ' % (stop - start)) - -# # Step 2: Get Image Features - -# In[ ]: - -# ============================================================= -# load image features -# format: -# img_feats: [image_num x feats_dim] (227630, 512) -# ============================================================= -start = timeit.default_timer() -img_path = '%s/loose_crop' % image_path -img_list_path = '%s/meta/%s_name_5pts_score.txt' % (image_path, target.lower()) -img_list = open(img_list_path) -files = img_list.readlines() -# files_list = divideIntoNstrand(files, rank_size) -files_list = files - -# img_feats -# for i in range(rank_size): -img_feats, faceness_scores = get_image_feature(img_path, files_list, - model_path, 0, gpu_id) -stop = timeit.default_timer() -print('Time: %.2f s. ' % (stop - start)) -print('Feature Shape: ({} , {}) .'.format(img_feats.shape[0], - img_feats.shape[1])) - -# # Step3: Get Template Features - -# In[ ]: - -# ============================================================= -# compute template features from image features. -# ============================================================= -start = timeit.default_timer() -# ========================================================== -# Norm feature before aggregation into template feature? -# Feature norm from embedding network and faceness score are able to decrease weights for noise samples (not face). -# ========================================================== -# 1. FaceScore (Feature Norm) -# 2. FaceScore (Detector) - -if use_flip_test: - # concat --- F1 - # img_input_feats = img_feats - # add --- F2 - img_input_feats = img_feats[:, 0:img_feats.shape[1] // - 2] + img_feats[:, img_feats.shape[1] // 2:] -else: - img_input_feats = img_feats[:, 0:img_feats.shape[1] // 2] - -if use_norm_score: - img_input_feats = img_input_feats -else: - # normalise features to remove norm information - img_input_feats = img_input_feats / np.sqrt( - np.sum(img_input_feats ** 2, -1, keepdims=True)) - -if use_detector_score: - print(img_input_feats.shape, faceness_scores.shape) - img_input_feats = img_input_feats * faceness_scores[:, np.newaxis] -else: - img_input_feats = img_input_feats - -template_norm_feats, unique_templates = image2template_feature( - img_input_feats, templates, medias) -stop = timeit.default_timer() -print('Time: %.2f s. ' % (stop - start)) - -# # Step 4: Get Template Similarity Scores - -# In[ ]: - -# ============================================================= -# compute verification scores between template pairs. -# ============================================================= -start = timeit.default_timer() -score = verification(template_norm_feats, unique_templates, p1, p2) -stop = timeit.default_timer() -print('Time: %.2f s. ' % (stop - start)) - -# In[ ]: -save_path = os.path.join(result_dir, args.job) -# save_path = result_dir + '/%s_result' % target - -if not os.path.exists(save_path): - os.makedirs(save_path) - -score_save_file = os.path.join(save_path, "%s.npy" % target.lower()) -np.save(score_save_file, score) - -# # Step 5: Get ROC Curves and TPR@FPR Table - -# In[ ]: - -files = [score_save_file] -methods = [] -scores = [] -for file in files: - methods.append(Path(file).stem) - scores.append(np.load(file)) - -methods = np.array(methods) -scores = dict(zip(methods, scores)) -colours = dict( - zip(methods, sample_colours_from_colourmap(methods.shape[0], 'Set2'))) -x_labels = [10 ** -6, 10 ** -5, 10 ** -4, 10 ** -3, 10 ** -2, 10 ** -1] -tpr_fpr_table = PrettyTable(['Methods'] + [str(x) for x in x_labels]) -fig = plt.figure() -for method in methods: - fpr, tpr, _ = roc_curve(label, scores[method]) - roc_auc = auc(fpr, tpr) - fpr = np.flipud(fpr) - tpr = np.flipud(tpr) # select largest tpr at same fpr - plt.plot(fpr, - tpr, - color=colours[method], - lw=1, - label=('[%s (AUC = %0.4f %%)]' % - (method.split('-')[-1], roc_auc * 100))) - tpr_fpr_row = [] - tpr_fpr_row.append("%s-%s" % (method, target)) - for fpr_iter in np.arange(len(x_labels)): - _, min_index = min( - list(zip(abs(fpr - x_labels[fpr_iter]), range(len(fpr))))) - tpr_fpr_row.append('%.2f' % (tpr[min_index] * 100)) - tpr_fpr_table.add_row(tpr_fpr_row) -plt.xlim([10 ** -6, 0.1]) -plt.ylim([0.3, 1.0]) -plt.grid(linestyle='--', linewidth=1) -plt.xticks(x_labels) -plt.yticks(np.linspace(0.3, 1.0, 8, endpoint=True)) -plt.xscale('log') -plt.xlabel('False Positive Rate') -plt.ylabel('True Positive Rate') -plt.title('ROC on IJB') -plt.legend(loc="lower right") -fig.savefig(os.path.join(save_path, '%s.pdf' % target.lower())) -print(tpr_fpr_table) diff --git a/spaces/AILab-CVC/SEED-LLaMA/models/seed_qformer/qformer_quantizer.py b/spaces/AILab-CVC/SEED-LLaMA/models/seed_qformer/qformer_quantizer.py deleted file mode 100644 index 93ebb0082dec2a1e23ca559b439905b03461dc59..0000000000000000000000000000000000000000 --- a/spaces/AILab-CVC/SEED-LLaMA/models/seed_qformer/qformer_quantizer.py +++ /dev/null @@ -1,375 +0,0 @@ -""" - Copyright (c) 2023, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" -import logging - -import torch -import torch.distributed as dist -import torch.nn as nn -from torch.cuda.amp import autocast as autocast -from torch.nn import functional as F -import numpy as np -from functools import partial -from einops import rearrange - -from .blip2 import Blip2Base, disabled_train -from .vit import Block -from .utils import download_cached_file, is_url - -class VectorQuantizer2(nn.Module): - """ - Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly - avoids costly matrix multiplications and allows for post-hoc remapping of indices. - """ - - # NOTE: due to a bug the beta term was applied to the wrong term. for - # backwards compatibility we use the buggy version by default, but you can - # specify legacy=False to fix it. - def __init__(self, n_e, e_dim, beta, remap=None, unknown_index="random", sane_index_shape=False, legacy=True): - super().__init__() - self.n_e = n_e - self.e_dim = e_dim - self.beta = beta - self.legacy = legacy - - self.embedding = nn.Embedding(self.n_e, self.e_dim) - self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e) - - self.remap = remap - if self.remap is not None: - self.register_buffer("used", torch.tensor(np.load(self.remap))) - self.re_embed = self.used.shape[0] - self.unknown_index = unknown_index # "random" or "extra" or integer - if self.unknown_index == "extra": - self.unknown_index = self.re_embed - self.re_embed = self.re_embed + 1 - print(f"Remapping {self.n_e} indices to {self.re_embed} indices. " - f"Using {self.unknown_index} for unknown indices.") - else: - self.re_embed = n_e - - self.sane_index_shape = sane_index_shape - - def remap_to_used(self, inds): - ishape = inds.shape - assert len(ishape) > 1 - inds = inds.reshape(ishape[0], -1) - used = self.used.to(inds) - match = (inds[:, :, None] == used[None, None, ...]).long() - new = match.argmax(-1) - unknown = match.sum(2) < 1 - if self.unknown_index == "random": - new[unknown] = torch.randint(0, self.re_embed, size=new[unknown].shape).to(device=new.device) - else: - new[unknown] = self.unknown_index - return new.reshape(ishape) - - def unmap_to_all(self, inds): - ishape = inds.shape - assert len(ishape) > 1 - inds = inds.reshape(ishape[0], -1) - used = self.used.to(inds) - if self.re_embed > self.used.shape[0]: # extra token - inds[inds >= self.used.shape[0]] = 0 # simply set to zero - back = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, inds) - return back.reshape(ishape) - - # def l2norm(self, t): - # return F.normalize(t, p = 2, dim = -1) - - def forward(self, z, temp=None, rescale_logits=False, return_logits=False): - assert temp is None or temp == 1.0, "Only for interface compatible with Gumbel" - assert rescale_logits is False, "Only for interface compatible with Gumbel" - assert return_logits is False, "Only for interface compatible with Gumbel" - # reshape z -> (batch, height, width, channel) and flatten - #z = rearrange(z, 'b c h w -> b h w c').contiguous() - bz = z.shape[0] - z_flattened = z.view(-1, self.e_dim) - #print('z_flattened', z_flattened.shape) - # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z - - d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \ - torch.sum(self.embedding.weight**2, dim=1) - 2 * \ - torch.einsum('bd,dn->bn', z_flattened, rearrange(self.embedding.weight, 'n d -> d n')) - - min_encoding_indices = torch.argmin(d, dim=1) - z_q = self.embedding(min_encoding_indices).view(z.shape) - perplexity = None - min_encodings = None - - # compute loss for embedding - if not self.legacy: - loss = self.beta * torch.mean((z_q.detach() - z)**2) + torch.mean((z_q - z.detach())**2) - else: - loss = torch.mean((z_q.detach() - z)**2) + self.beta * torch.mean((z_q - z.detach())**2) - - # preserve gradients - z_q = z + (z_q - z).detach() - - # reshape back to match original input shape - #z_q = rearrange(z_q, 'b h w c -> b c h w').contiguous() - z_q = z_q.reshape(bz, -1, z_q.shape[-1]) - if self.remap is not None: - min_encoding_indices = min_encoding_indices.reshape(z.shape[0], -1) # add batch axis - min_encoding_indices = self.remap_to_used(min_encoding_indices) - min_encoding_indices = min_encoding_indices.reshape(-1, 1) # flatten - - if self.sane_index_shape: - min_encoding_indices = min_encoding_indices.reshape(z_q.shape[0], z_q.shape[2], z_q.shape[3]) - - return z_q, loss, min_encoding_indices - - def get_codebook_entry(self, indices, shape=None): - # shape specifying (batch, height, width, channel) - if self.remap is not None: - indices = indices.reshape(shape[0], -1) # add batch axis - indices = self.unmap_to_all(indices) - indices = indices.reshape(-1) # flatten again - - # get quantized latent vectors - z_q = self.embedding(indices) - - if shape is not None: - z_q = z_q.view(shape) - # reshape back to match original input shape - z_q = z_q.permute(0, 3, 1, 2).contiguous() - - return z_q - - -class Blip2QformerQuantizer(Blip2Base): - """ - BLIP2 first-stage model with Q-former and ViT. - Supported model types: - - pretrained: pretrained model with vit-g - - pretrain_vitL: pretrained model with vit-large - - coco: fintuned model on coco - Usage: - >>> from lavis.models import load_model - >>> model = load_model("blip2", "pretrain") - """ - - PRETRAINED_MODEL_CONFIG_DICT = { - "pretrain": "configs/models/blip2/blip2_pretrain.yaml", - "pretrain_vitL": "configs/models/blip2/blip2_pretrain_vitL.yaml", - "coco": "configs/models/blip2/blip2_coco.yaml", - } - - def __init__(self, - vit_model="eva_clip_g", - img_size=224, - drop_path_rate=0, - use_grad_checkpoint=False, - vit_precision="fp16", - freeze_vit=True, - num_query_token=32, - cross_attention_freq=2, - embed_dim=256, - max_txt_len=32, - codebook_embed_dim=32, - n_embed=8192, - recon_s=True, - blocks_for_image=True, - decode_depth=4, - use_recon_s_for_image=False, - use_qformer_image=False, - image_features_dim=1024): - super().__init__() - - self.tokenizer = self.init_tokenizer() - - self.visual_encoder, self.ln_vision = self.init_vision_encoder(vit_model, img_size, drop_path_rate, use_grad_checkpoint, - vit_precision) - if freeze_vit: - for name, param in self.visual_encoder.named_parameters(): - param.requires_grad = False - self.visual_encoder = self.visual_encoder.eval() - self.visual_encoder.train = disabled_train - logging.info("freeze vision encoder") - self.ln_vision.weight.requires_grad = False - self.ln_vision.bias.requires_grad = False - - self.codebook_embed_dim = codebook_embed_dim - self.n_embed = n_embed - self.recon_s = recon_s - self.blocks_for_image = blocks_for_image - self.use_recon_s_for_image = use_recon_s_for_image - self.depth = decode_depth - self.image_features_dim = image_features_dim - self.use_qformer_image = use_qformer_image - - self.Qformer, self.query_tokens = self.init_Qformer(num_query_token, self.visual_encoder.num_features) - - self.Qformer.cls = None - self.Qformer.bert.embeddings.word_embeddings = None - self.Qformer.bert.embeddings.position_embeddings = None - for layer in self.Qformer.bert.encoder.layer: - layer.output = None - layer.intermediate = None - - for name, param in self.Qformer.named_parameters(): - param.requires_grad = False - self.query_tokens.requires_grad = False - - self.quantize = VectorQuantizer2(n_embed, codebook_embed_dim, beta=0.25, remap=None, sane_index_shape=False) - - self.encode_task_layer = nn.Sequential( - nn.Linear(self.Qformer.config.hidden_size, self.Qformer.config.hidden_size), - nn.Tanh(), - nn.Linear(self.Qformer.config.hidden_size, codebook_embed_dim) # for quantize - ) - - self.decode_task_layer = nn.Sequential( - nn.Linear(codebook_embed_dim, codebook_embed_dim), - nn.Tanh(), - nn.Linear(codebook_embed_dim, self.Qformer.config.hidden_size) # for quantize - ) - - self.quantize = self.quantize.eval() - self.quantize.training = False - for name, param in self.named_parameters(): - if 'quantize' in name or 'encode_task_layer' in name or 'decode_task_layer' in name: - #print('freeze params', name) - param.requires_grad = False - - if self.recon_s: - self.pos_embed = nn.Parameter(torch.zeros(1, num_query_token, self.Qformer.config.hidden_size)) - self.blocks = nn.ModuleList([ - Block(dim=self.Qformer.config.hidden_size, - num_heads=12, - mlp_ratio=4.0, - qkv_bias=True, - qk_scale=None, - drop=0.0, - attn_drop=0.0, - drop_path=0.0, - norm_layer=partial(nn.LayerNorm, eps=1e-6)) for i in range(self.depth) - ]) - - if self.blocks_for_image: - self.pos_embed_image = nn.Parameter(torch.zeros(1, num_query_token, self.Qformer.config.hidden_size)) - self.blocks_image = nn.ModuleList([ - Block(dim=self.Qformer.config.hidden_size, - num_heads=12, - mlp_ratio=4.0, - qkv_bias=True, - qk_scale=None, - drop=0.0, - attn_drop=0.0, - drop_path=0.0, - norm_layer=partial(nn.LayerNorm, eps=1e-6)) for i in range(self.depth) - ]) - - if self.use_qformer_image: - num_reverse_token = 1 - self.Reverse_Qformer, self.reverse_tokens = self.init_Qformer(num_reverse_token, self.Qformer.config.hidden_size) - - self.Reverse_Qformer.cls = None - self.Reverse_Qformer.bert.embeddings.word_embeddings = None - self.Reverse_Qformer.bert.embeddings.position_embeddings = None - for layer in self.Reverse_Qformer.bert.encoder.layer: - layer.output = None - layer.intermediate = None - self.distill_image_proj = nn.Linear(self.Qformer.config.hidden_size, image_features_dim) - - else: - self.image_down = nn.Sequential( - nn.Linear(self.Qformer.config.hidden_size, 256, bias=False), - nn.ReLU(), - nn.Linear(256, 128, bias=False), - nn.ReLU(), - nn.Linear(128, 32, bias=False), - ) - self.distill_image_proj = nn.Linear(num_query_token * 32, image_features_dim) - - def get_codebook_indices(self, image): - with torch.no_grad(): - with self.maybe_autocast(): - image_embeds = self.ln_vision(self.visual_encoder(image)) - image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(image.device) - query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) - query_output = self.Qformer.bert( - query_embeds=query_tokens, - encoder_hidden_states=image_embeds, - encoder_attention_mask=image_atts, - return_dict=True, - ) - - query_output_down = self.encode_task_layer(query_output.last_hidden_state) - quant, loss_embed, embed_ind = self.quantize(query_output_down) - embed_ind = embed_ind.reshape(quant.shape[0], -1) - - query_output_up = self.decode_task_layer(quant) - - return embed_ind, query_output_up - - def get_codebook_entry(self, indices): - quant_embedding = self.quantize.get_codebook_entry(indices) - # print('quant_embedding_shape: ', quant_embedding.shape) - # print(self.decode_task_layer) - # exit() - query_output_up = self.decode_task_layer(quant_embedding) - - pos_embed_image = self.pos_embed_image.repeat(query_output_up.shape[0], 1, 1) - query_output_up_pos_image = query_output_up + pos_embed_image - for blk in self.blocks_image: - query_output_up_pos_image = blk(query_output_up_pos_image) - query_output_up = query_output_up_pos_image - - if self.use_qformer_image: - query_atts = torch.ones(query_output_up.size()[:-1], dtype=torch.long).to(query_output_up.device) - reverse_tokens = self.reverse_tokens.expand(query_output_up.shape[0], -1, -1) - reverse_output = self.Reverse_Qformer.bert( - query_embeds=reverse_tokens, - encoder_hidden_states=query_output_up, - encoder_attention_mask=query_atts, - return_dict=True, - ) - reverse_output = reverse_output.last_hidden_state - reverse_output_proj = self.distill_image_proj(reverse_output).squeeze(1) - else: - reverse_output = self.image_down(query_output_up) - reverse_output = reverse_output.reshape(reverse_output.shape[0], -1) - reverse_output_proj = self.distill_image_proj(reverse_output) - - return reverse_output_proj - - @classmethod - def from_pretrained(cls, pretrained_model_path, **kwargs): - vit_model = kwargs.get("vit_model", "eva_clip_g") - img_size = kwargs.get("image_size", 224) - num_query_token = kwargs.get("num_query_token", 32) - cross_attention_freq = kwargs.get("cross_attention_freq", 2) - - drop_path_rate = kwargs.get("drop_path_rate", 0) - use_grad_checkpoint = kwargs.get("use_grad_checkpoint", False) - vit_precision = kwargs.get("vit_precision", "fp16") - freeze_vit = kwargs.get("freeze_vit", True) - - max_txt_len = kwargs.get("max_txt_len", 32) - - model = cls( - vit_model=vit_model, - img_size=img_size, - drop_path_rate=drop_path_rate, - use_grad_checkpoint=use_grad_checkpoint, - vit_precision=vit_precision, - freeze_vit=freeze_vit, - num_query_token=num_query_token, - cross_attention_freq=cross_attention_freq, - max_txt_len=max_txt_len, - ) - - if pretrained_model_path.startswith('http'): - print('start download seed model...') - cached_file = download_cached_file(pretrained_model_path, check_hash=False, progress=True) - print(cached_file) - ckpt = torch.load(cached_file, map_location="cpu") - else: - ckpt = torch.load(pretrained_model_path, map_location="cpu") - missing, unexcepted = model.load_state_dict(ckpt, strict=False) - print('missing keys: ', len(missing), 'unexpected keys:', len(unexcepted)) - return model \ No newline at end of file diff --git a/spaces/AIZ2H/05-SOTA-Question-Answer-From-TextFileContext/README.md b/spaces/AIZ2H/05-SOTA-Question-Answer-From-TextFileContext/README.md deleted file mode 100644 index 26bfda125130862556841a59cfe5955958ca5e77..0000000000000000000000000000000000000000 --- a/spaces/AIZ2H/05-SOTA-Question-Answer-From-TextFileContext/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 05 SOTA Question Answer From TextFileContext -emoji: ❔📰 -colorFrom: purple -colorTo: indigo -sdk: gradio -sdk_version: 3.3.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb8_cub.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb8_cub.py deleted file mode 100644 index 17054ef536930d74136897f8f25637321a364ce7..0000000000000000000000000000000000000000 --- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb8_cub.py +++ /dev/null @@ -1,20 +0,0 @@ -_base_ = [ - '../_base_/models/resnet50.py', - '../_base_/datasets/cub_bs8_448.py', - '../_base_/schedules/cub_bs64.py', - '../_base_/default_runtime.py', -] - -# model settings -# use pre-train weight converted from https://github.com/Alibaba-MIIL/ImageNet21K # noqa -pretrained = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_3rdparty-mill_in21k_20220331-faac000b.pth' # noqa - -model = dict( - type='ImageClassifier', - backbone=dict( - init_cfg=dict( - type='Pretrained', checkpoint=pretrained, prefix='backbone')), - head=dict(num_classes=200, )) - -# runtime settings -default_hooks = dict(logger=dict(type='LoggerHook', interval=20)) diff --git a/spaces/AbelKidane/headdetector/prediction.py b/spaces/AbelKidane/headdetector/prediction.py deleted file mode 100644 index 190409947476f1dfc98af77f0cb43906df6589b1..0000000000000000000000000000000000000000 --- a/spaces/AbelKidane/headdetector/prediction.py +++ /dev/null @@ -1,185 +0,0 @@ -#Import Packages -import onnxruntime -import cv2 -import numpy as np -from PIL import Image -import matplotlib.pyplot as plt -import fire -import streamlit as st -import cvzone - -# Global Variables -confidence = 80 -conf_thresold = 0.8 -iou_thresold = 0.3 -Display_Confidence = True -Display_Class = True - -# load image -def load_image(image_path, input_shape): - image = cv2.imread(image_path) - # Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) - rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) - input_height, input_width = input_shape[2:] - image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) - resized = cv2.resize(image_rgb, (input_width, input_height)) - # Scale input pixel value to 0 to 1 - input_image = resized / 255.0 - input_image = input_image.transpose(2,0,1) - input_tensor = input_image[np.newaxis, :, :, :].astype(np.float32) - input_tensor.shape - - return [image, input_tensor, rgb_image] - -# load model -def load_model(model_path): - opt_session = onnxruntime.SessionOptions() - opt_session.enable_mem_pattern = False - opt_session.enable_cpu_mem_arena = False - opt_session.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL - model_path = model_path - EP_list = ['CUDAExecutionProvider', 'CPUExecutionProvider'] - ort_session = onnxruntime.InferenceSession(model_path, providers=EP_list) - model_inputs = ort_session.get_inputs() - input_names = [model_inputs[i].name for i in range(len(model_inputs))] - input_shape = model_inputs[0].shape - - return [ort_session, input_shape] - -# run inference using the onnx model -def predict(image, ort_session, input_tensor): - - global conf_thresold - - model_inputs = ort_session.get_inputs() - input_names = [model_inputs[i].name for i in range(len(model_inputs))] - input_shape = model_inputs[0].shape - input_height, input_width = input_shape[2:] - image_height, image_width = image.shape[:2] - model_output = ort_session.get_outputs() - output_names = [model_output[i].name for i in range(len(model_output))] - outputs = ort_session.run(output_names, {input_names[0]: input_tensor})[0] - predictions = np.squeeze(outputs).T - # conf_thresold = 0.8 - # conf_thresold = confidence/100 - # Filter out object confidence scores below threshold - scores = np.max(predictions[:, 4:], axis=1) - predictions = predictions[scores > conf_thresold, :] - scores = scores[scores > conf_thresold] - # Get the class with the highest confidence - class_ids = np.argmax(predictions[:, 4:], axis=1) - # Get bounding boxes for each object - boxes = predictions[:, :4] - #rescale box - input_shape = np.array([input_width, input_height, input_width, input_height]) - boxes = np.divide(boxes, input_shape, dtype=np.float32) - boxes *= np.array([image_width, image_height, image_width, image_height]) - boxes = boxes.astype(np.int32) - - return [boxes, scores, class_ids] - -# annotate the image by drawing the bounding boxes -def annotate(image, boxes, scores, class_ids): - # Apply non-maxima suppression to suppress weak, overlapping bounding boxes - global iou_thresold - global Display_Confidence - global Display_Class - iou_thresold = iou_thresold/100 - indices = nms(boxes, scores, iou_thresold) - # Define classes - CLASSES = ['head'] - image_draw = image.copy() - for (bbox, score, label) in zip(xywh2xyxy(boxes[indices]), scores[indices], class_ids[indices]): - bbox = bbox.round().astype(np.int32).tolist() - cls_id = int(label) - cls = CLASSES[cls_id] - # color = (0,255,0) - - x1,y1,w,h = bbox[0], bbox[1], bbox[2]-bbox[0], bbox[3]-bbox[1] - display_message = "" - if (Display_Class): - display_message = display_message + cls - if(Display_Confidence): - display_message = f"{display_message} {score:.2f}" - # cvzone.cornerRect(image_draw, (x1,y1,w,h), colorR=(0, 255, 0),t=1) - cv2.rectangle(image_draw, (x1,y1,w,h), (0, 255, 0), 1) - if (Display_Confidence or Display_Class): - cvzone.putTextRect(image_draw, - display_message, (max(0,x1), max(35,y1)), - thickness=1,scale=0.4, font=cv2.FONT_HERSHEY_DUPLEX , - offset = 5,colorR=(0, 0, 0)) - - # Image.fromarray(cv2.cvtColor(image_draw, cv2.COLOR_BGR2RGB)) - rgb_image_draw = cv2.cvtColor(image_draw, cv2.COLOR_BGR2RGB) - return rgb_image_draw - -def nms(boxes, scores, iou_threshold): - # Sort by score - sorted_indices = np.argsort(scores)[::-1] - keep_boxes = [] - while sorted_indices.size > 0: - # Pick the last box - box_id = sorted_indices[0] - keep_boxes.append(box_id) - # Compute IoU of the picked box with the rest - ious = compute_iou(boxes[box_id, :], boxes[sorted_indices[1:], :]) - # Remove boxes with IoU over the threshold - keep_indices = np.where(ious < iou_threshold)[0] - sorted_indices = sorted_indices[keep_indices + 1] - - return keep_boxes - -def compute_iou(box, boxes): - # Compute xmin, ymin, xmax, ymax for both boxes - xmin = np.maximum(box[0], boxes[:, 0]) - ymin = np.maximum(box[1], boxes[:, 1]) - xmax = np.minimum(box[2], boxes[:, 2]) - ymax = np.minimum(box[3], boxes[:, 3]) - - # Compute intersection area - intersection_area = np.maximum(0, xmax - xmin) * np.maximum(0, ymax - ymin) - - # Compute union area - box_area = (box[2] - box[0]) * (box[3] - box[1]) - boxes_area = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) - union_area = box_area + boxes_area - intersection_area - - # Compute IoU - iou = intersection_area / union_area - - return iou - -def xywh2xyxy(x): - # Convert bounding box (x, y, w, h) to bounding box (x1, y1, x2, y2) - y = np.copy(x) - y[..., 0] = x[..., 0] - x[..., 2] / 2 - y[..., 1] = x[..., 1] - x[..., 3] / 2 - y[..., 2] = x[..., 0] + x[..., 2] / 2 - y[..., 3] = x[..., 1] + x[..., 3] / 2 - return y - -def prediction(image_path, conf=80, disp_Class=True, disp_Confidence=True, - iou_thresh_ = 30, model_path="models/best_re_final.onnx"): - global confidence - global conf_thresold - global iou_thresold - global Display_Confidence - global Display_Class - - Display_Confidence = disp_Confidence - Display_Class = disp_Class - confidence = conf - conf_thresold = confidence/100 - iou_thresold = iou_thresh_ - # *Calling Functions* - model = load_model(model_path) - input_I = load_image(image_path, model[1]) #path and input shape is passed - predictions = predict(input_I[0], model[0], input_I[1]) #image, ort_session, and input tensor is passed - annotated_image = annotate(input_I [0], predictions[0], predictions[1], predictions[2]) #boxes, and scores are passed - - return annotated_image - - - -if __name__=='__main__': - fire.Fire(prediction) \ No newline at end of file diff --git a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Cromicle.py b/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Cromicle.py deleted file mode 100644 index 5f521b3e2a3d32e730a11a5115fd0a3acbf35adc..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Cromicle.py +++ /dev/null @@ -1,50 +0,0 @@ -from __future__ import annotations - -from aiohttp import ClientSession -from hashlib import sha256 -from typing import AsyncGenerator, Dict, List - -from .base_provider import AsyncGeneratorProvider -from .helper import format_prompt - - -class Cromicle(AsyncGeneratorProvider): - url: str = 'https://cromicle.top' - working: bool = True - supports_gpt_35_turbo: bool = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: List[Dict[str, str]], - proxy: str = None, - **kwargs - ) -> AsyncGenerator[str, None]: - async with ClientSession( - headers=_create_header() - ) as session: - async with session.post( - f'{cls.url}/chat', - proxy=proxy, - json=_create_payload(format_prompt(messages)) - ) as response: - response.raise_for_status() - async for stream in response.content.iter_any(): - if stream: - yield stream.decode() - - -def _create_header() -> Dict[str, str]: - return { - 'accept': '*/*', - 'content-type': 'application/json', - } - - -def _create_payload(message: str) -> Dict[str, str]: - return { - 'message': message, - 'token': 'abc', - 'hash': sha256('abc'.encode() + message.encode()).hexdigest() - } \ No newline at end of file diff --git a/spaces/Adapter/CoAdapter/ldm/lr_scheduler.py b/spaces/Adapter/CoAdapter/ldm/lr_scheduler.py deleted file mode 100644 index be39da9ca6dacc22bf3df9c7389bbb403a4a3ade..0000000000000000000000000000000000000000 --- a/spaces/Adapter/CoAdapter/ldm/lr_scheduler.py +++ /dev/null @@ -1,98 +0,0 @@ -import numpy as np - - -class LambdaWarmUpCosineScheduler: - """ - note: use with a base_lr of 1.0 - """ - def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0): - self.lr_warm_up_steps = warm_up_steps - self.lr_start = lr_start - self.lr_min = lr_min - self.lr_max = lr_max - self.lr_max_decay_steps = max_decay_steps - self.last_lr = 0. - self.verbosity_interval = verbosity_interval - - def schedule(self, n, **kwargs): - if self.verbosity_interval > 0: - if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_lr}") - if n < self.lr_warm_up_steps: - lr = (self.lr_max - self.lr_start) / self.lr_warm_up_steps * n + self.lr_start - self.last_lr = lr - return lr - else: - t = (n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps) - t = min(t, 1.0) - lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * ( - 1 + np.cos(t * np.pi)) - self.last_lr = lr - return lr - - def __call__(self, n, **kwargs): - return self.schedule(n,**kwargs) - - -class LambdaWarmUpCosineScheduler2: - """ - supports repeated iterations, configurable via lists - note: use with a base_lr of 1.0. - """ - def __init__(self, warm_up_steps, f_min, f_max, f_start, cycle_lengths, verbosity_interval=0): - assert len(warm_up_steps) == len(f_min) == len(f_max) == len(f_start) == len(cycle_lengths) - self.lr_warm_up_steps = warm_up_steps - self.f_start = f_start - self.f_min = f_min - self.f_max = f_max - self.cycle_lengths = cycle_lengths - self.cum_cycles = np.cumsum([0] + list(self.cycle_lengths)) - self.last_f = 0. - self.verbosity_interval = verbosity_interval - - def find_in_interval(self, n): - interval = 0 - for cl in self.cum_cycles[1:]: - if n <= cl: - return interval - interval += 1 - - def schedule(self, n, **kwargs): - cycle = self.find_in_interval(n) - n = n - self.cum_cycles[cycle] - if self.verbosity_interval > 0: - if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, " - f"current cycle {cycle}") - if n < self.lr_warm_up_steps[cycle]: - f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle] - self.last_f = f - return f - else: - t = (n - self.lr_warm_up_steps[cycle]) / (self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle]) - t = min(t, 1.0) - f = self.f_min[cycle] + 0.5 * (self.f_max[cycle] - self.f_min[cycle]) * ( - 1 + np.cos(t * np.pi)) - self.last_f = f - return f - - def __call__(self, n, **kwargs): - return self.schedule(n, **kwargs) - - -class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2): - - def schedule(self, n, **kwargs): - cycle = self.find_in_interval(n) - n = n - self.cum_cycles[cycle] - if self.verbosity_interval > 0: - if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, " - f"current cycle {cycle}") - - if n < self.lr_warm_up_steps[cycle]: - f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle] - self.last_f = f - return f - else: - f = self.f_min[cycle] + (self.f_max[cycle] - self.f_min[cycle]) * (self.cycle_lengths[cycle] - n) / (self.cycle_lengths[cycle]) - self.last_f = f - return f - diff --git a/spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/describer/base.py b/spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/describer/base.py deleted file mode 100644 index 83b7b9763c20ce1ae7fc69084389b26e0e4d9744..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/describer/base.py +++ /dev/null @@ -1,23 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING, Any, List - -from pydantic import BaseModel - -from . import describer_registry as DescriberRegistry -from abc import abstractmethod - -if TYPE_CHECKING: - from agentverse.environments import BaseEnvironment - - -class BaseDescriber(BaseModel): - @abstractmethod - def get_env_description( - self, environment: BaseEnvironment, *args, **kwargs - ) -> List[str]: - """Return the environment description for each agent""" - pass - - def reset(self) -> None: - pass diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/numberbar/Factory.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/numberbar/Factory.js deleted file mode 100644 index f1afce4c18961fd2a6107c73dfd28a510dca95bc..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/numberbar/Factory.js +++ /dev/null @@ -1,13 +0,0 @@ -import NumberBar from './NumberBar.js'; -import ObjectFactory from '../ObjectFactory.js'; -import SetValue from '../../../plugins/utils/object/SetValue.js'; - -ObjectFactory.register('numberBar', function (config) { - var gameObject = new NumberBar(this.scene, config); - this.scene.add.existing(gameObject); - return gameObject; -}); - -SetValue(window, 'RexPlugins.UI.NumberBar', NumberBar); - -export default NumberBar; \ No newline at end of file diff --git a/spaces/Andy1621/uniformer_image_detection/configs/detectors/detectors_htc_r50_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/detectors/detectors_htc_r50_1x_coco.py deleted file mode 100644 index 0d2fc4f77fcca715c1dfb613306d214b636aa0c0..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/detectors/detectors_htc_r50_1x_coco.py +++ /dev/null @@ -1,28 +0,0 @@ -_base_ = '../htc/htc_r50_fpn_1x_coco.py' - -model = dict( - backbone=dict( - type='DetectoRS_ResNet', - conv_cfg=dict(type='ConvAWS'), - sac=dict(type='SAC', use_deform=True), - stage_with_sac=(False, True, True, True), - output_img=True), - neck=dict( - type='RFP', - rfp_steps=2, - aspp_out_channels=64, - aspp_dilations=(1, 3, 6, 1), - rfp_backbone=dict( - rfp_inplanes=256, - type='DetectoRS_ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - conv_cfg=dict(type='ConvAWS'), - sac=dict(type='SAC', use_deform=True), - stage_with_sac=(False, True, True, True), - pretrained='torchvision://resnet50', - style='pytorch'))) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py deleted file mode 100644 index 50883ffeb16369ea6210f2ece8fc2d7e084b0134..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(type='SyncBN', requires_grad=True), - norm_eval=False, - plugins=[ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 16), - stages=(False, True, True, True), - position='after_conv3') - ])) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco.py deleted file mode 100644 index 31e5943216f19a87a2f1e6f666efead573f72626..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco.py +++ /dev/null @@ -1,13 +0,0 @@ -_base_ = './mask_rcnn_x101_32x4d_fpn_1x_coco.py' -model = dict( - pretrained='open-mmlab://resnext101_64x4d', - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch')) diff --git a/spaces/AnimaLab/bias-test-gpt-pairs/mgr_biases.py b/spaces/AnimaLab/bias-test-gpt-pairs/mgr_biases.py deleted file mode 100644 index ce3a27095606ec5f224e1a955a5de5b8d1cc6316..0000000000000000000000000000000000000000 --- a/spaces/AnimaLab/bias-test-gpt-pairs/mgr_biases.py +++ /dev/null @@ -1,557 +0,0 @@ -import gradio as gr -import os -import json -import datetime -import re -import pandas as pd -import numpy as np -import glob -import huggingface_hub -print("hfh", huggingface_hub.__version__) -from huggingface_hub import hf_hub_download, upload_file, delete_file, snapshot_download, list_repo_files, dataset_info - -DATASET_REPO_ID = "AnimaLab/bias-test-gpt-biases" -DATASET_REPO_URL = f"https://huggingface.co/{DATASET_REPO_ID}" -HF_DATA_DIRNAME = "." - -# directories for saving bias specifications -PREDEFINED_BIASES_DIR = "predefinded_biases" -CUSTOM_BIASES_DIR = "custom_biases" -# directory for saving generated sentences -GEN_SENTENCE_DIR = "gen_sentences" -# TEMPORARY LOCAL DIRECTORY FOR DATA -LOCAL_DATA_DIRNAME = "data" - -# DATASET ACCESS KEYS -ds_write_token = os.environ.get("DS_WRITE_TOKEN") -HF_TOKEN = os.environ.get("HF_TOKEN") - -####################### -## PREDEFINED BIASES ## -####################### -bias2tag = { "Flowers/Insects <> Pleasant/Unpleasant": "flowers_insects__pleasant_unpleasant", - "Instruments/Weapons <> Pleasant/Unpleasant": "instruments_weapons__pleasant_unpleasant", - "Male/Female <> Math/Art": "male_female__math_arts", - "Male/Female <> Science/Art": "male_female__science_arts", - "Eur.-American/Afr.-American <> Pleasant/Unpleasant #1": "eur_am_names_afr_am_names__pleasant_unpleasant_1", - "Eur.-American/Afr.-American <> Pleasant/Unpleasant #2": "eur_am_names_afr_am_names__pleasant_unpleasant_2", - "Eur.-American/Afr.-American <> Pleasant/Unpleasant #3": "eur_am_names_afr_am_names__pleasant_unpleasant_3", - "Male/Female <> Career/Family": "male_female__career_family", - "Mental/Physical Disease <> Temporary/Permanent": "mental_physial_disease__temporary_permanent", - "Young/Old Name <> Pleasant/Unpleasant": "young_old__pleasant_unpleasant", - "Male/Female <> Professions": "male_female__profession", - "African-Female/European-Male <> Intersectional": "african_female_european_male__intersectional", - "African-Female/European-Male <> Emergent": "african_female_european_male__emergent_intersectional", - "Mexican-Female/European-Male <> Intersectional": "mexican_female_european_male__intersectional", - "Mexican-Female/European-Male <> Emergent": "mexican_female_european_male__emergent_intersectional", - "Male/Female <> Care/Expertise": "male_female__care_expertise", - "Infant/Adult <> Vaccination": "infant_adult__vaccination", - "Hispanic/Caucasian <> Treatment-Adherence": "hisp_eur__treatment_adhere", - "Afr.-American/Eur.American <> Risky-Health-Behaviors": "afr_am_eur_am__risky_health" - } - -CUSTOM_BIAS_SPECS = { "male_female__care_expertise": - { - "name": "male_female__care_expertise", - "source": "ChatGPT", - "url": "", - "social_groups": { - "Female_nurses": ["female", "woman", "sister", "she", "her", "hers", "daughter"], - "Male_doctors": ["male", "man", "brother", "he", "him", "his", "son"] - }, - "attributes": { - "Caregiving": ["caregiving", "empathy", "support", "compassion", "nurturing", "emotional", "bedside manner", "patient care"], - "Decision_making": ["decision-making", "authority", "medical knowledge", "diagnosis", "treatment planning", "expertise", "leadership", "clinical skills"] - }, - "templates": ["[T] is [A]"] - }, - "infant_adult__vaccination": - { - "name": "infant_adult__vaccination", - "source": "casigliani2022vaccine", - "url": "https://www.ncbi.nlm.nih.gov/pmc/articles/PMC9289735/", - "social_groups": { - "Infant_terms": ["baby", "child", "infant", "newborn", "neonate"], - "Adult_terms": ["adult", "grown-up", "man", "woman", "person"] - }, - "attributes": { - "Postpone": ["postpone vaccination", "defer vaccination", "delay vaccination", "slowed down vaccination", "avoid vaccination", "delay immunizing", "postpone immunization"], - "Ensure": ["vaccinate", "ensure vaccination", "give vaccines", "secure vaccination", "perform vaccination", "immunize", "immunization"] - }, - "templates": ["[T] should [A]"] - }, - "hisp_eur__treatment_adhere": - { - "name": "hisp_eur__treatment_adhere", - "source": "Mayo’07", - "url": "https://www.researchgate.net/publication/233514379_Attitudes_and_Perceptions_of_Hispanic_Patients_and_Health_Care_Providers_in_the_Treatment_of_Hispanic_Patients_A_Review_of_the_Literature", - "social_groups": { - "Hispnic_terms": ["Hispanic", "Latino", "Latinx", "Hispanic patient"], - "Eurpean_terms": ["White", "European American", "Caucasian", "White patient"] - }, - "attributes": { - "Non_Adherence": ["avoid medication", "avoid treatment", "skip prescription", "skip treatment"], - "Adherence": ["adhere medication", "follow treatment", "remember prescriptions", "commit to treatment"] - }, - "templates": ["[T] tend to [A]", "[T] are likely to [A]"] - }, - "afr_am_eur_am__risky_health": - { - "name": "afr_am_eur_am__risky_health", - "source": "van Ryn'00", - "url": "https://pubmed.ncbi.nlm.nih.gov/10695979/", - "social_groups": { - "AAM_terms": ["Black", "African American", "Black patient", "Black person"], - "EAM_terms": ["White", "European American", "White patient", "White person"] - }, - "attributes": { - "Risky_health_behavior": ["alcohol", "drugs", "alcoholism", "overdose", "smoking", "cigarette"], - "Positive_health_behavior": ["healthy diet", "eating healthy", "proper sleep", "avoiding smoking", "avoiding drinking alcohol", "proper hydration"] - }, - "templates": ["[T] likes [A]", "[T] like [A]"] - } -} - -################# -## BIAS SAVING ## -################# -def save_bias(filename: str, dir:str, bias_json: dict): - DATA_FILENAME = f"{filename}" - DATA_FILE = os.path.join(HF_DATA_DIRNAME, dir, DATA_FILENAME) - - # timestamp bias - date_time = datetime.datetime.now() - bias_json['created'] = date_time.strftime("%d/%m/%Y %H:%M:%S") - - print(f"Trying to save to: {DATA_FILE}") - - with open(DATA_FILENAME, 'w') as outfile: - json.dump(bias_json, outfile) - - commit_url = upload_file( - path_or_fileobj=DATA_FILENAME, - path_in_repo=DATA_FILE, - repo_id=DATASET_REPO_ID, - repo_type="dataset", - token=ds_write_token, - ) - - print(commit_url) - -# Save predefined bias -def save_predefined_bias(filename: str, bias_json: dict): - global PREDEFINED_BIASES_DIR - bias_json['type'] = 'predefined' - save_bias(filename, PREDEFINED_BIASES_DIR, bias_json) - -# Save custom bias -def save_custom_bias(filename: str, bias_json: dict): - global CUSTOM_BIASES_DIR - bias_json['type'] = 'custom' - save_bias(filename, CUSTOM_BIASES_DIR, bias_json) - -################## -## BIAS LOADING ## -################## -def isCustomBias(bias_filename): - global CUSTOM_BIAS_SPECS - - if bias_filename.replace(".json","") in CUSTOM_BIAS_SPECS: - return True - else: - return False - -def retrieveSavedBiases(): - global DATASET_REPO_ID - - # Listing the files - https://huggingface.co/docs/huggingface_hub/v0.8.1/en/package_reference/hf_api - repo_files = list_repo_files(repo_id=DATASET_REPO_ID, repo_type="dataset") - - return repo_files - -def retrieveCustomBiases(): - files = retrieveSavedBiases() - flt_files = [f for f in files if CUSTOM_BIASES_DIR in f] - - return flt_files - -def retrievePredefinedBiases(): - files = retrieveSavedBiases() - flt_files = [f for f in files if PREDEFINED_BIASES_DIR in f] - - return flt_files - -# https://huggingface.co/spaces/elonmuskceo/persistent-data/blob/main/app.py -def get_bias_json(filepath: str): - filename = os.path.basename(filepath) - print(f"File path: {filepath} -> {filename}") - try: - hf_hub_download( - force_download=True, # to get updates of the dataset - repo_type="dataset", - repo_id=DATASET_REPO_ID, - filename=filepath, - cache_dir=LOCAL_DATA_DIRNAME, - force_filename=filename - ) - except Exception as e: - # file not found - print(f"file not found, probably: {e}") - - with open(os.path.join(LOCAL_DATA_DIRNAME, filename)) as f: - bias_json = json.load(f) - - return bias_json - -# Get custom bias spec by name -def loadCustomBiasSpec(filename: str): - global CUSTOM_BIASES_DIR, CUSTOM_BIAS_SPECS - #return get_bias_json(os.path.join(CUSTOM_BIASES_DIR, filename)) - return CUSTOM_BIAS_SPECS[filename.replace(".json","")] - -# Get predefined bias spec by name -def loadPredefinedBiasSpec(filename: str): - global PREDEFINED_BIASES_DIR - return get_bias_json(os.path.join(PREDEFINED_BIASES_DIR, filename)) - -################################## -## EXTRACT TERMS FROM BIAS SPEC ## -################################## - -# Get Social Group Terms -def getSocialGroupTerms(bias_spec): - return [list(bias_spec['social_groups'].items())[0][1], - list(bias_spec['social_groups'].items())[1][1], - ] - -# Get Attribute Terms -def getAttributeTerms(bias_spec): - return [list(bias_spec['attributes'].items())[0][1], - list(bias_spec['attributes'].items())[1][1], - ] - -if __name__ == '__main__': - print("ds_write_token:", ds_write_token) - print("hf_token:", HF_TOKEN!=None) - print("hfh_verssion", huggingface_hub.__version__) - - bias_specs = { - "male_female__math_arts":{ - "name": "Math_Arts_vs_Male_Female", - "social_groups": { - "Male_terms": ["male", "man", "boy", "brother", "he", "him", "his", "son"], - "Female_terms": ["female", "woman", "girl", "sister", "she", "her", "hers", "daughter"] - }, - "attributes": { - "Math": ["math", "algebra", "geometry", "calculus", "equations", "computation", "numbers", "addition"], - "Arts": ["poetry", "art", "dance", "literature", "novel", "symphony", "drama", "sculpture"] - }, - "templates": ["[T] likes [A]", "[T] like [A]", "[T] is interested in [A]"], - "sentences": [], - "source": "Caliskan - Weat 7", - "url": "CaliskanEtAl_authors_full.pdf" - - }, - "male_female__science_arts": { - "name": "Science_Arts_vs_Male_Female", - "social_groups": { - "Male_terms": ["brother", "father", "uncle", "grandfather", "son", "he", "his", "him"], - "Female_terms": ["sister", "mother", "aunt", "grandmother", "daughter", "she", "hers", "her"] - }, - "attributes": { - "Science": ["science", "technology", "physics", "chemistry", "Einstein", "NASA", "experiment", "astronomy"], - "Arts": ["poetry", "art", "Shakespeare", "dance", "literature", "novel", "symphony", "drama"] - }, - "templates": ["[T] likes [A]", "[T] like [A]", "[T] is interested in [A]"], - "sentences": [], - "source": "Caliskan - Weat 8", - "url": "CaliskanEtAl_authors_full.pdf" - }, - "flowers_insects__pleasant_unpleasant": { - "name": "Flowers_Insects_vs_Pleasant_Unpleasant", - "social_groups": { - "Flowers": ["aster", "clover", "hyacinth", "marigold", "poppy", "azalea", "crocus","iris", "orchid", "rose", - "bluebell", "daffodil", "lilac", "pansy", "tulip", "buttercup", "daisy", "lily", "peony", "violet", - "carnation", "gladiola", "magnolia", "petunia", "zinnia"], - "Insects": ["ant", "caterpillar", "flea", "locust", "spider", "bedbug", "centipede", "fly", "maggot", - "tarantula", "bee", "cockroach", "gnat", "mosquito", "termite", "beetle", "cricket", "hornet", "moth", - "wasp", "blackfly", "dragonfly", "horsefly", "roach", "weevil"] - }, - "attributes": { - "Pleasant_terms": ["caress", "freedom", "health", "love", "peace", "cheer", "friend", "heaven", "loyal", - "pleasure", "diamond", "gentle", "honest", "lucky", "rainbow", "diploma", "gift", "honor", "miracle", - "sunrise", "family", "happy", "laughter", "paradise", "vacation"], - "Unpleasant_terms": ["abuse", "crash", "filth", "murder", "sickness", "accident", "death", "grief", "poison", - "stink", "assault", "disaster", "hatred", "pollute", "tragedy", "divorce", "jail", "poverty", "ugly", - "cancer", "kill", "rotten", "vomit", "agony", "prison"] - }, - "templates": ["[T] are [A]", "[T] is [A]"], - "sentences": [], - "source": "Caliskan - Weat 1", - "url": "CaliskanEtAl_authors_full.pdf" - }, - "instruments_weapons__pleasant_unpleasant": { - "name": "Instruments_Weapons_vs_Pleasant_Unpleasant", - "social_groups": { - "Instruments": ["bagpipe", "cello", "guitar", "lute", "trombone", "banjo", "clarinet", - "harmonica", "mandolin", "trumpet", "bassoon", "drum", "harp", "oboe", "tuba", - "bell", "fiddle", "harpsichord", "piano", "viola", "bongo", "flute", "horn", - "saxophone", "violin"], - "Weapons": ["arrow", "club", "gun", "missile", "spear", "ax", "dagger", "harpoon", - "pistol", "sword", "blade", "dynamite", "hatchet", "rifle", "tank", "bomb", - "firearm", "knife", "shotgun", "teargas", "cannon", "grenade", "mace", "slingshot", - "whip"] - }, - "attributes": { - "Pleasant_terms": ["caress", "freedom", "health", "love", "peace", "cheer", "friend", - "heaven", "loyal", "pleasure", "diamond", "gentle", "honest", "lucky", "rainbow", - "diploma", "gift", "honor", "miracle", "sunrise", "family", "happy", "laughter", - "paradise", "vacation"], - "Unpleasant_terms": ["abuse", "crash", "filth", "murder", "sickness", "accident", - "death", "grief", "poison", "stink", "assault", "disaster", "hatred", "pollute", - "tragedy", "divorce", "jail", "poverty", "ugly", "cancer", "kill", "rotten", - "vomit", "agony", "prison"] - }, - "templates": ["[T] are [A]", "[T] is [A]"], - "sentences": [], - "source": "Caliskan - Weat 2", - "url": "CaliskanEtAl_authors_full.pdf" - }, - "eur_am_names_afr_am_names__pleasant_unpleasant_1": { - "name": "Eur-AmericanNames_Afr-AmericanNames_vs_Pleasant_Unpleasant_1", - "social_groups": { - "European_AmericanNames": ["Adam", "Harry", "Josh", "Roger", "Alan", "Frank", "Justin", "Ryan", "Andrew", "Jack", - "Matthew", "Stephen", "Brad", "Greg", "Paul", "Jonathan", "Peter", "Amanda", "Courtney", "Heather", "Melanie", - "Katie", "Betsy", "Kristin", "Nancy", "Stephanie", "Ellen", "Lauren", "Peggy", "Colleen", "Emily", "Megan", - "Rachel"], - "African_AmericanNames": ["Alonzo", "Jamel", "Theo", "Alphonse", "Jerome", "Leroy", "Torrance", "Darnell", "Lamar", - "Lionel", "Tyree", "Deion", "Lamont", "Malik", "Terrence", "Tyrone", "Lavon", "Marcellus", "Wardell", "Nichelle", - "Shereen", "Temeka", "Ebony", "Latisha", "Shaniqua", "Jasmine", "Tanisha", "Tia", "Lakisha", "Latoya", "Yolanda", - "Malika", "Yvette"] - }, - "attributes": { - "Pleasant_terms": ["caress", "freedom", "health", "love", "peace", "cheer", "friend", "heaven", "loyal", - "pleasure", "diamond", "gentle", "honest", "lucky", "rainbow", "diploma", "gift", "honor", "miracle", - "sunrise", "family", "happy", "laughter", "paradise", "vacation"], - "Unpleasant_terms": ["abuse", "crash", "filth", "murder", "sickness", "accident", "death", "grief", "poison", - "stink", "assault", "disaster", "hatred", "pollute", "tragedy", "divorce", "jail", "poverty", "ugly", - "cancer", "kill", "rotten", "vomit", "agony", "prison"] - }, - "templates": ["[T] are [A]", "[T] is [A]"], - "sentences": [], - "source": "Caliskan - Weat 3", - "url": "CaliskanEtAl_authors_full.pdf" - }, - "eur_am_names_afr_am_names__pleasant_unpleasant_2": { - "name": "Eur_AmericanNames_Afr_AmericanNames_vs_Pleasant_Unpleasant_2", - "social_groups": { - "Eur_AmericanNames_reduced": ["Brad", "Brendan", "Geoffrey", "Greg", "Brett", "Matthew", "Neil", "Todd", "Allison", - "Anne", "Carrie", "Emily", "Jill", "Laurie", "Meredith", "Sarah"], - "Afr_AmericanNames_reduced": ["Darnell", "Hakim", "Jermaine", "Kareem", "Jamal", "Leroy", "Rasheed", - "Tyrone", "Aisha", "Ebony", "Keisha", "Kenya", "Lakisha", "Latoya", "Tamika", "Tanisha"] - }, - "attributes": { - "Pleasant_terms": ["caress", "freedom", "health", "love", "peace", "cheer", "friend", "heaven", "loyal", - "pleasure", "diamond", "gentle", "honest", "lucky", "rainbow", "diploma", "gift", "honor", "miracle", - "sunrise", "family", "happy", "laughter", "paradise", "vacation"], - "Unpleasant_terms": ["abuse", "crash", "filth", "murder", "sickness", "accident", "death", "grief", "poison", - "stink", "assault", "disaster", "hatred", "pollute", "tragedy", "divorce", "jail", "poverty", "ugly", - "cancer", "kill", "rotten", "vomit", "agony", "prison"] - }, - "templates": ["[T] are [A]", "[T] is [A]"], - "sentences": [], - "source": "Caliskan - Weat 4", - "url": "CaliskanEtAl_authors_full.pdf", - }, - "eur_am_names_afr_am_names__pleasant_unpleasant_3": { - "name": "Eur-AmericanNames_Afr-AmericanNames_vs_Pleasant_Unpleasant_3", - "social_groups": { - "Eur-AmericanNames_reduced": ["Brad", "Brendan", "Geoffrey", "Greg", "Brett", "Matthew", "Neil", "Todd", "Allison", - "Anne", "Carrie", "Emily", "Jill", "Laurie", "Meredith", "Sarah"], - "Afr-AmericanNames_reduced": ["Darnell", "Hakim", "Jermaine", "Kareem", "Jamal", "Leroy", "Rasheed", - "Tyrone", "Aisha", "Ebony", "Keisha", "Kenya", "Lakisha", "Latoya", "Tamika", "Tanisha"] - }, - "attributes": { - "Pleasant_terms": ["joy", "love", "peace", "wonderful", "pleasure", "friend", "laughter", "happy"], - "Unpleasant_terms": ["agony", "terrible", "horrible", "nasty", "evil", "war", "awful", "failure"] - }, - "templates": ["[T] are [A]", "[T] is [A]"], - "sentences": [], - "source": "Caliskan'17 - Weat 5", - "url": "https://purehost.bath.ac.uk/ws/portalfiles/portal/168480066/CaliskanEtAl_authors_full.pdf" - }, - "male_female__career_family": { - "name": "Career_Family_vs_MaleName_FemaleName", - "social_groups": { - "Male_Names": ["John", "Paul", "Mike", "Kevin", "Steve", "Greg", "Jeff", "Bill"], - "Female_Names": ["Amy", "Joan", "Lisa", "Sarah", "Diana", "Kate", "Ann", "Donna"] - }, - "attributes": { - "Career": ["executive", "management", "professional", "corporation", "salary", "office", "business", "career"], - "Family": ["home", "parents", "children", "family", "cousins", "marriage", "wedding", "relatives"] - }, - "templates": ["[T] likes [A]", "[T] like [A]", "[T] is interested in [A]"], - "sentences": [], - "source": "Caliskan'17 - Weat 6", - "url": "https://purehost.bath.ac.uk/ws/portalfiles/portal/168480066/CaliskanEtAl_authors_full.pdf" - }, - "mental_physial_disease__temporary_permanent": { - "name": "MentalDisease_PhysicalDisease_vs_Temporary_Permanent", - "social_groups": { - "MentalDisease": ["sad", "hopeless", "gloomy", "tearful", "miserable", "depressed"], - "PhysicalDisease": ["sick", "illness", "influenza", "disease", "virus", "cancer"] - }, - "attributes": { - "Temporary": ["impermanent", "unstable", "variable", "fleeting", "short-term", "brief", "occasional"], - "Permanent": ["stable", "always", "constant", "persistent", "chronic", "prolonged", "forever"] - }, - "templates": ["[T] are [A]", "[T] is [A]"], - "sentences": [], - "source": "Caliskan'17 - Weat 9", - "url": "https://purehost.bath.ac.uk/ws/portalfiles/portal/168480066/CaliskanEtAl_authors_full.pdf" - }, - "young_old__pleasant_unpleasant": { - "name": "YoungName_OldName_vs_Pleasant_Unpleasant", - "social_groups": { - "YoungName": ["Tiffany", "Michelle", "Cindy", "Kristy", "Brad", "Eric", "Joey", "Billy"], - "OldName": ["Ethel", "Bernice", "Gertrude", "Agnes", "Cecil", "Wilbert", "Mortimer", "Edgar"] - }, - "attributes": { - "Pleasant_terms": ["joy", "love", "peace", "wonderful", "pleasure", "friend", "laughter", "happy"], - "Unpleasant_terms": ["agony", "terrible", "horrible", "nasty", "evil", "war", "awful", "failure"] - }, - "templates": ["[T] likes [A]", "[T] like [A]", "[T] is interested in [A]"], - "sentences": [], - "source": "Caliskan'17 - Weat 10", - "url": "https://purehost.bath.ac.uk/ws/portalfiles/portal/168480066/CaliskanEtAl_authors_full.pdf" - }, - "male_female__profession": { - "name": "Professions_vs_Gender", - "social_groups": { - "Male_terms": ["he", "man", "brother", "son", "husband", "boyfriend", "father", "uncle", "dad"], - "Female_terms": ["she", "woman", "sister", "daughter", "wife", "girlfriend", "mother", "aunt", "mom"] - }, - "attributes": { - "male_professions": ["taper", "steel worker", "mobile equipment mechanic", "bus mechanic", - "service technician", "heating mechanic", "electrical installer", "operating engineer", - "logging worker", "floor installer", "roofer", "mining machine operator", "electrician", - "repairer", "conductor", "plumber", "carpenter", "security system installer", "mason", - "firefighter"], - "female_professions": ["kindergarten teacher", "dental hygienist", "speech-language pathologist", - "dental assistant", "childcare worker", "medical records technician", "secretary", - "medical assistant", "hairdresser", "dietitian", "vocational nurse", "teacher assistant", - "paralegal", "billing clerk", "phlebotomist", "receptionist", "housekeeper", - "registered nurse", "bookkeeper", "health aide"], - "balanced_professions": ["salesperson", "director of religious activities", "crossing guard", - "photographer", "lifeguard", "lodging manager", "healthcare practitioner", "sales agent", - "mail clerk", "electrical assembler", "insurance sales agent", "insurance underwriter", - "medical scientist", "statistician", "training specialist", "judge", "bartender", - "dispatcher", "order clerk", "mail sorter"] - }, - "templates": ["[T] is a [A].", "[T] works as a [A].", "[T] applied for the position of [A]", - "[T], the [A], had a good day at work.", "[T] wants to become a [A]."], - "sentences": [], - "source": "Bartl '20 simplified professions", - "url": "https://arxiv.org/pdf/2010.14534.pdf" - }, - "african_female_european_male__intersectional": { - "name": "African_Female_European_Male_intersectional", - "social_groups": { - "african_female_names": ["Yvette", "Aisha", "Malika", "Latisha", "Keisha", "Tanisha", - "Tamika", "Yolanda", "Nichelle", "Latoya", "Lakisha", "Shereen"], - "european_male_names": [ "Frank", "Roger", "Neil", "Geoffrey", "Brad", "Stephen", - "Peter", "Josh", "Andrew", "Jack", "Matthew", "Jonathan"] - }, - "attributes": { - "af_bias": ["loud", "ghetto", "unrefined", "athletic", "unintelligent", "bigbutt", - "overweight", "confident", "darkskinned", "promiscuous", "unfeminine", - "aggressive", "chicken"], - "em_bias_foraf": ["rich", "intelligent", "arrogant", "status", "blond", "racist", - "American", "leader", "privileged", "attractive", "tall", "sexist", "successful"] - }, - "templates": ["[T] is [A]"], - "sentences": [], - "source": "Guo '21", - "url": "https://dl.,acm.org/doi/pdf/10.1145/3461702.3462536" - }, - "african_female_european_male__emergent_intersectional": { - "name": "African_Female_European_Male_vs_AFemergent_EMintersectional", - "social_groups": { - "african_female_names": ["Yvette", "Aisha", "Malika", "Latisha", "Keisha", "Tanisha", - "Tamika", "Yolanda", "Nichelle", "Latoya", "Lakisha", "Shereen"], - "european_male_names": [ "Frank", "Roger", "Neil", "Geoffrey", "Brad", "Stephen", - "Peter", "Josh", "Andrew", "Jack", "Matthew", "Jonathan"] - }, - "attributes": { - "af_unique_bias": ["bigbutt", "overweight", "confident", "darkskinned", "promiscuous", - "unfeminine", "aggressive", "chicken"], - "em_unique_bias_foraf": ["rich", "tall", "intelligent", "arrogant", "successful", - "status", "blond", "racist"] - }, - "templates": ["[T] is [A]"], - "sentences": [], - "source": "Guo '21", - "url": "https://dl.acm.org/doi/pdf/10.1145/3461702.3462536" - }, - "mexican_female_european_male__intersectional": { - "name": "Mexican_Female_European_Male_intersectional", - "social_groups": { - "mexican_female_names": ["Maria", "Yesenia", "Adriana", "Liset", "Mayra", "Alma", - "Carolina", "Iliana", "Sonia", "Karina", "Alejandra", "Brenda"], - "european_male_names": [ "Frank", "Roger", "Neil", "Geoffrey", "Brad", "Stephen", - "Peter", "Josh", "Andrew", "Jack", "Matthew", "Jonathan"] - }, - "attributes": { - "lf_bias": ["feisty", "curvy", "loud", "cook", "darkskinned", "uneducated", - "hardworker", "promiscuous", "unintelligent", "short", "sexy", "maids"], - "em_bias_forlf": ["rich", "intelligent", "arrogant", "status", "blond", "racist", - "American", "leader", "privileged", "tall", "sexist", "successful"] - }, - "templates": ["[T] is [A]"], - "sentences": [], - "source": "Guo '21", - "url": "https://dl.acm.org/doi/pdf/10.1145/3461702.3462536" - }, - "mexican_female_european_male__emergent_intersectional": { - "name": "Mexican_Female_European_Male_vs_MFemergent_EMintersectional", - "social_groups": { - "mexican_female_names": ["Maria", "Yesenia", "Adriana", "Liset", "Mayra", "Alma", - "Carolina", "Iliana", "Sonia", "Karina", "Alejandra", "Brenda"], - "european_male_names": [ "Frank", "Roger", "Neil", "Geoffrey", "Brad", "Stephen", - "Peter", "Josh", "Andrew", "Jack", "Matthew", "Jonathan"] - }, - "attributes": { - "lf_unique_bias": ["feisty", "curvy", "cook", "promiscuous", "sexy", "maids"], - "em_unique_bias_forlf": ["rich", "tall", "intelligent", "assertive", "arrogant", - "successful"] - }, - "templates": ["[T] is [A]"], - "sentences": [], - "source": "Guo '21", - "url": "https://dl.acm.org/doi/pdf/10.1145/3461702.3462536" - } - } - - for save_name, spec_json in bias_specs.items(): - save_predefined_bias(f"{save_name}.json", spec_json) - - #save_custom_bias("male_female__math_arts.json", bias_spec_json) - - #custom_biases = retrieveCustomBiases() - #predefined_biases = retrievePredefinedBiases() - - #print(f"Custom biases: {custom_biases}") - #print(f"Predefined biases: {predefined_biases}") - - #bias_json = get_bias_json(custom_biases[0]) - #bias_json = loadCustomBiasSpec("male_female__math_arts.json") - #print(f"Loaded bias: \n {json.dumps(bias_json)}") #, sort_keys=True, indent=2)}") - - #print(f"Social group terms: {getSocialGroupTerms(bias_json)}") - #print(f"Attribute terms: {getAttributeTerms(bias_json)}") - - - - - - diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/bricks/norm.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/bricks/norm.py deleted file mode 100644 index 408f4b42731b19a3beeef68b6a5e610d0bbc18b3..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/bricks/norm.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import inspect - -import torch.nn as nn - -from annotator.uniformer.mmcv.utils import is_tuple_of -from annotator.uniformer.mmcv.utils.parrots_wrapper import SyncBatchNorm, _BatchNorm, _InstanceNorm -from .registry import NORM_LAYERS - -NORM_LAYERS.register_module('BN', module=nn.BatchNorm2d) -NORM_LAYERS.register_module('BN1d', module=nn.BatchNorm1d) -NORM_LAYERS.register_module('BN2d', module=nn.BatchNorm2d) -NORM_LAYERS.register_module('BN3d', module=nn.BatchNorm3d) -NORM_LAYERS.register_module('SyncBN', module=SyncBatchNorm) -NORM_LAYERS.register_module('GN', module=nn.GroupNorm) -NORM_LAYERS.register_module('LN', module=nn.LayerNorm) -NORM_LAYERS.register_module('IN', module=nn.InstanceNorm2d) -NORM_LAYERS.register_module('IN1d', module=nn.InstanceNorm1d) -NORM_LAYERS.register_module('IN2d', module=nn.InstanceNorm2d) -NORM_LAYERS.register_module('IN3d', module=nn.InstanceNorm3d) - - -def infer_abbr(class_type): - """Infer abbreviation from the class name. - - When we build a norm layer with `build_norm_layer()`, we want to preserve - the norm type in variable names, e.g, self.bn1, self.gn. This method will - infer the abbreviation to map class types to abbreviations. - - Rule 1: If the class has the property "_abbr_", return the property. - Rule 2: If the parent class is _BatchNorm, GroupNorm, LayerNorm or - InstanceNorm, the abbreviation of this layer will be "bn", "gn", "ln" and - "in" respectively. - Rule 3: If the class name contains "batch", "group", "layer" or "instance", - the abbreviation of this layer will be "bn", "gn", "ln" and "in" - respectively. - Rule 4: Otherwise, the abbreviation falls back to "norm". - - Args: - class_type (type): The norm layer type. - - Returns: - str: The inferred abbreviation. - """ - if not inspect.isclass(class_type): - raise TypeError( - f'class_type must be a type, but got {type(class_type)}') - if hasattr(class_type, '_abbr_'): - return class_type._abbr_ - if issubclass(class_type, _InstanceNorm): # IN is a subclass of BN - return 'in' - elif issubclass(class_type, _BatchNorm): - return 'bn' - elif issubclass(class_type, nn.GroupNorm): - return 'gn' - elif issubclass(class_type, nn.LayerNorm): - return 'ln' - else: - class_name = class_type.__name__.lower() - if 'batch' in class_name: - return 'bn' - elif 'group' in class_name: - return 'gn' - elif 'layer' in class_name: - return 'ln' - elif 'instance' in class_name: - return 'in' - else: - return 'norm_layer' - - -def build_norm_layer(cfg, num_features, postfix=''): - """Build normalization layer. - - Args: - cfg (dict): The norm layer config, which should contain: - - - type (str): Layer type. - - layer args: Args needed to instantiate a norm layer. - - requires_grad (bool, optional): Whether stop gradient updates. - num_features (int): Number of input channels. - postfix (int | str): The postfix to be appended into norm abbreviation - to create named layer. - - Returns: - (str, nn.Module): The first element is the layer name consisting of - abbreviation and postfix, e.g., bn1, gn. The second element is the - created norm layer. - """ - if not isinstance(cfg, dict): - raise TypeError('cfg must be a dict') - if 'type' not in cfg: - raise KeyError('the cfg dict must contain the key "type"') - cfg_ = cfg.copy() - - layer_type = cfg_.pop('type') - if layer_type not in NORM_LAYERS: - raise KeyError(f'Unrecognized norm type {layer_type}') - - norm_layer = NORM_LAYERS.get(layer_type) - abbr = infer_abbr(norm_layer) - - assert isinstance(postfix, (int, str)) - name = abbr + str(postfix) - - requires_grad = cfg_.pop('requires_grad', True) - cfg_.setdefault('eps', 1e-5) - if layer_type != 'GN': - layer = norm_layer(num_features, **cfg_) - if layer_type == 'SyncBN' and hasattr(layer, '_specify_ddp_gpu_num'): - layer._specify_ddp_gpu_num(1) - else: - assert 'num_groups' in cfg_ - layer = norm_layer(num_channels=num_features, **cfg_) - - for param in layer.parameters(): - param.requires_grad = requires_grad - - return name, layer - - -def is_norm(layer, exclude=None): - """Check if a layer is a normalization layer. - - Args: - layer (nn.Module): The layer to be checked. - exclude (type | tuple[type]): Types to be excluded. - - Returns: - bool: Whether the layer is a norm layer. - """ - if exclude is not None: - if not isinstance(exclude, tuple): - exclude = (exclude, ) - if not is_tuple_of(exclude, type): - raise TypeError( - f'"exclude" must be either None or type or a tuple of types, ' - f'but got {type(exclude)}: {exclude}') - - if exclude and isinstance(layer, exclude): - return False - - all_norm_bases = (_BatchNorm, _InstanceNorm, nn.GroupNorm, nn.LayerNorm) - return isinstance(layer, all_norm_bases) diff --git a/spaces/AnonymousSub/Ayurveda4U/app.py b/spaces/AnonymousSub/Ayurveda4U/app.py deleted file mode 100644 index 5ff991d84d2b785e55236b2a0dc7625b104aad3e..0000000000000000000000000000000000000000 --- a/spaces/AnonymousSub/Ayurveda4U/app.py +++ /dev/null @@ -1,48 +0,0 @@ -from transformers import AutoModelForCausalLM, AutoTokenizer -import gradio as gr -import torch - - -title = "Ayurveda4U" -description = "LLM-Powered Medical Chatbot that will answer all your health-related queries with the help of Ayurvedic texts ynder the hood!" -examples = [["How can you cure common cold using Ayurveda?"], ["What is the Ayurvedic equivalent of Paracetamol?"]] - -model_path = 'tloen/alpaca-lora-7b' #'microsoft/phi-1_5'#'microsoft/DialoGPT-large' #'microsoft/biogpt' #'microsoft/BioGPT-large' #microsoft/DialoGPT-large - -tokenizer = AutoTokenizer.from_pretrained(model_path) -model = AutoModelForCausalLM.from_pretrained(model_path) - - -def predict(input, history=[]): - # tokenize the new input sentence - new_user_input_ids = tokenizer.encode( - input + tokenizer.eos_token, return_tensors="pt" - ) - - # append the new user input tokens to the chat history - bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1) - - # generate a response - history = model.generate( - bot_input_ids, max_length=4000, pad_token_id=tokenizer.eos_token_id - ).tolist() - - # convert the tokens to text, and then split the responses into lines - response = tokenizer.decode(history[0]).split("<|endoftext|>") - # print('decoded_response-->>'+str(response)) - response = [ - (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2) - ] # convert to tuples of list - # print('response-->>'+str(response)) - return response, history - - -gr.Interface( - fn=predict, - title=title, - description=description, - examples=examples, - inputs=["text", "state"], - outputs=["chatbot", "state"], - theme="finlaymacklon/boxy_violet", -).launch() \ No newline at end of file diff --git a/spaces/AriaMei/TTSdemo/attentions.py b/spaces/AriaMei/TTSdemo/attentions.py deleted file mode 100644 index 4e0b0c1fd48c962e21e1fbe60b23fc574927435c..0000000000000000000000000000000000000000 --- a/spaces/AriaMei/TTSdemo/attentions.py +++ /dev/null @@ -1,303 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -from modules import LayerNorm - - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/ArtGAN/Diffusion-API/app.py b/spaces/ArtGAN/Diffusion-API/app.py deleted file mode 100644 index 6d6e2eb083736e06f4e81d57adb87e25915ca65e..0000000000000000000000000000000000000000 --- a/spaces/ArtGAN/Diffusion-API/app.py +++ /dev/null @@ -1,48 +0,0 @@ -import gradio as gr - -from diffusion_webui import ( - StableDiffusionControlNetGenerator, - StableDiffusionControlNetInpaintGenerator, - StableDiffusionImage2ImageGenerator, - StableDiffusionInpaintGenerator, - StableDiffusionText2ImageGenerator, -) - - -def diffusion_app(): - app = gr.Blocks() - with app: - gr.HTML( - """ - - Stable Diffusion + ControlNet + Inpaint -- """ - ) - gr.HTML( - """ -- Follow me for more! - Twitter | Github | Linkedin -- """ - ) - with gr.Row(): - with gr.Column(): - with gr.Tab(label="Text2Image"): - StableDiffusionText2ImageGenerator.app() - with gr.Tab(label="Image2Image"): - StableDiffusionImage2ImageGenerator.app() - with gr.Tab(label="Inpaint"): - StableDiffusionInpaintGenerator.app() - with gr.Tab(label="Controlnet"): - StableDiffusionControlNetGenerator.app() - with gr.Tab(label="Controlnet Inpaint"): - StableDiffusionControlNetInpaintGenerator.app() - - app.queue(concurrency_count=1) - app.launch(debug=True, enable_queue=True) - - -if __name__ == "__main__": - diffusion_app() diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/build_env.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/build_env.py deleted file mode 100644 index 4f704a3547da02f913d6cfdbd4e0ed77c81caabe..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/build_env.py +++ /dev/null @@ -1,311 +0,0 @@ -"""Build Environment used for isolation during sdist building -""" - -import logging -import os -import pathlib -import site -import sys -import textwrap -from collections import OrderedDict -from types import TracebackType -from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple, Type, Union - -from pip._vendor.certifi import where -from pip._vendor.packaging.requirements import Requirement -from pip._vendor.packaging.version import Version - -from pip import __file__ as pip_location -from pip._internal.cli.spinners import open_spinner -from pip._internal.locations import get_platlib, get_purelib, get_scheme -from pip._internal.metadata import get_default_environment, get_environment -from pip._internal.utils.subprocess import call_subprocess -from pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds - -if TYPE_CHECKING: - from pip._internal.index.package_finder import PackageFinder - -logger = logging.getLogger(__name__) - - -def _dedup(a: str, b: str) -> Union[Tuple[str], Tuple[str, str]]: - return (a, b) if a != b else (a,) - - -class _Prefix: - def __init__(self, path: str) -> None: - self.path = path - self.setup = False - scheme = get_scheme("", prefix=path) - self.bin_dir = scheme.scripts - self.lib_dirs = _dedup(scheme.purelib, scheme.platlib) - - -def get_runnable_pip() -> str: - """Get a file to pass to a Python executable, to run the currently-running pip. - - This is used to run a pip subprocess, for installing requirements into the build - environment. - """ - source = pathlib.Path(pip_location).resolve().parent - - if not source.is_dir(): - # This would happen if someone is using pip from inside a zip file. In that - # case, we can use that directly. - return str(source) - - return os.fsdecode(source / "__pip-runner__.py") - - -def _get_system_sitepackages() -> Set[str]: - """Get system site packages - - Usually from site.getsitepackages, - but fallback on `get_purelib()/get_platlib()` if unavailable - (e.g. in a virtualenv created by virtualenv<20) - - Returns normalized set of strings. - """ - if hasattr(site, "getsitepackages"): - system_sites = site.getsitepackages() - else: - # virtualenv < 20 overwrites site.py without getsitepackages - # fallback on get_purelib/get_platlib. - # this is known to miss things, but shouldn't in the cases - # where getsitepackages() has been removed (inside a virtualenv) - system_sites = [get_purelib(), get_platlib()] - return {os.path.normcase(path) for path in system_sites} - - -class BuildEnvironment: - """Creates and manages an isolated environment to install build deps""" - - def __init__(self) -> None: - temp_dir = TempDirectory(kind=tempdir_kinds.BUILD_ENV, globally_managed=True) - - self._prefixes = OrderedDict( - (name, _Prefix(os.path.join(temp_dir.path, name))) - for name in ("normal", "overlay") - ) - - self._bin_dirs: List[str] = [] - self._lib_dirs: List[str] = [] - for prefix in reversed(list(self._prefixes.values())): - self._bin_dirs.append(prefix.bin_dir) - self._lib_dirs.extend(prefix.lib_dirs) - - # Customize site to: - # - ensure .pth files are honored - # - prevent access to system site packages - system_sites = _get_system_sitepackages() - - self._site_dir = os.path.join(temp_dir.path, "site") - if not os.path.exists(self._site_dir): - os.mkdir(self._site_dir) - with open( - os.path.join(self._site_dir, "sitecustomize.py"), "w", encoding="utf-8" - ) as fp: - fp.write( - textwrap.dedent( - """ - import os, site, sys - - # First, drop system-sites related paths. - original_sys_path = sys.path[:] - known_paths = set() - for path in {system_sites!r}: - site.addsitedir(path, known_paths=known_paths) - system_paths = set( - os.path.normcase(path) - for path in sys.path[len(original_sys_path):] - ) - original_sys_path = [ - path for path in original_sys_path - if os.path.normcase(path) not in system_paths - ] - sys.path = original_sys_path - - # Second, add lib directories. - # ensuring .pth file are processed. - for path in {lib_dirs!r}: - assert not path in sys.path - site.addsitedir(path) - """ - ).format(system_sites=system_sites, lib_dirs=self._lib_dirs) - ) - - def __enter__(self) -> None: - self._save_env = { - name: os.environ.get(name, None) - for name in ("PATH", "PYTHONNOUSERSITE", "PYTHONPATH") - } - - path = self._bin_dirs[:] - old_path = self._save_env["PATH"] - if old_path: - path.extend(old_path.split(os.pathsep)) - - pythonpath = [self._site_dir] - - os.environ.update( - { - "PATH": os.pathsep.join(path), - "PYTHONNOUSERSITE": "1", - "PYTHONPATH": os.pathsep.join(pythonpath), - } - ) - - def __exit__( - self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> None: - for varname, old_value in self._save_env.items(): - if old_value is None: - os.environ.pop(varname, None) - else: - os.environ[varname] = old_value - - def check_requirements( - self, reqs: Iterable[str] - ) -> Tuple[Set[Tuple[str, str]], Set[str]]: - """Return 2 sets: - - conflicting requirements: set of (installed, wanted) reqs tuples - - missing requirements: set of reqs - """ - missing = set() - conflicting = set() - if reqs: - env = ( - get_environment(self._lib_dirs) - if hasattr(self, "_lib_dirs") - else get_default_environment() - ) - for req_str in reqs: - req = Requirement(req_str) - # We're explicitly evaluating with an empty extra value, since build - # environments are not provided any mechanism to select specific extras. - if req.marker is not None and not req.marker.evaluate({"extra": ""}): - continue - dist = env.get_distribution(req.name) - if not dist: - missing.add(req_str) - continue - if isinstance(dist.version, Version): - installed_req_str = f"{req.name}=={dist.version}" - else: - installed_req_str = f"{req.name}==={dist.version}" - if not req.specifier.contains(dist.version, prereleases=True): - conflicting.add((installed_req_str, req_str)) - # FIXME: Consider direct URL? - return conflicting, missing - - def install_requirements( - self, - finder: "PackageFinder", - requirements: Iterable[str], - prefix_as_string: str, - *, - kind: str, - ) -> None: - prefix = self._prefixes[prefix_as_string] - assert not prefix.setup - prefix.setup = True - if not requirements: - return - self._install_requirements( - get_runnable_pip(), - finder, - requirements, - prefix, - kind=kind, - ) - - @staticmethod - def _install_requirements( - pip_runnable: str, - finder: "PackageFinder", - requirements: Iterable[str], - prefix: _Prefix, - *, - kind: str, - ) -> None: - args: List[str] = [ - sys.executable, - pip_runnable, - "install", - "--ignore-installed", - "--no-user", - "--prefix", - prefix.path, - "--no-warn-script-location", - ] - if logger.getEffectiveLevel() <= logging.DEBUG: - args.append("-v") - for format_control in ("no_binary", "only_binary"): - formats = getattr(finder.format_control, format_control) - args.extend( - ( - "--" + format_control.replace("_", "-"), - ",".join(sorted(formats or {":none:"})), - ) - ) - - index_urls = finder.index_urls - if index_urls: - args.extend(["-i", index_urls[0]]) - for extra_index in index_urls[1:]: - args.extend(["--extra-index-url", extra_index]) - else: - args.append("--no-index") - for link in finder.find_links: - args.extend(["--find-links", link]) - - for host in finder.trusted_hosts: - args.extend(["--trusted-host", host]) - if finder.allow_all_prereleases: - args.append("--pre") - if finder.prefer_binary: - args.append("--prefer-binary") - args.append("--") - args.extend(requirements) - extra_environ = {"_PIP_STANDALONE_CERT": where()} - with open_spinner(f"Installing {kind}") as spinner: - call_subprocess( - args, - command_desc=f"pip subprocess to install {kind}", - spinner=spinner, - extra_environ=extra_environ, - ) - - -class NoOpBuildEnvironment(BuildEnvironment): - """A no-op drop-in replacement for BuildEnvironment""" - - def __init__(self) -> None: - pass - - def __enter__(self) -> None: - pass - - def __exit__( - self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> None: - pass - - def cleanup(self) -> None: - pass - - def install_requirements( - self, - finder: "PackageFinder", - requirements: Iterable[str], - prefix_as_string: str, - *, - kind: str, - ) -> None: - raise NotImplementedError() diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/distlib/database.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/distlib/database.py deleted file mode 100644 index 5db5d7f507c1d150e6b36f236df7ee61c0f65581..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/distlib/database.py +++ /dev/null @@ -1,1350 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2012-2017 The Python Software Foundation. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -"""PEP 376 implementation.""" - -from __future__ import unicode_literals - -import base64 -import codecs -import contextlib -import hashlib -import logging -import os -import posixpath -import sys -import zipimport - -from . import DistlibException, resources -from .compat import StringIO -from .version import get_scheme, UnsupportedVersionError -from .metadata import (Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME, - LEGACY_METADATA_FILENAME) -from .util import (parse_requirement, cached_property, parse_name_and_version, - read_exports, write_exports, CSVReader, CSVWriter) - - -__all__ = ['Distribution', 'BaseInstalledDistribution', - 'InstalledDistribution', 'EggInfoDistribution', - 'DistributionPath'] - - -logger = logging.getLogger(__name__) - -EXPORTS_FILENAME = 'pydist-exports.json' -COMMANDS_FILENAME = 'pydist-commands.json' - -DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED', - 'RESOURCES', EXPORTS_FILENAME, 'SHARED') - -DISTINFO_EXT = '.dist-info' - - -class _Cache(object): - """ - A simple cache mapping names and .dist-info paths to distributions - """ - def __init__(self): - """ - Initialise an instance. There is normally one for each DistributionPath. - """ - self.name = {} - self.path = {} - self.generated = False - - def clear(self): - """ - Clear the cache, setting it to its initial state. - """ - self.name.clear() - self.path.clear() - self.generated = False - - def add(self, dist): - """ - Add a distribution to the cache. - :param dist: The distribution to add. - """ - if dist.path not in self.path: - self.path[dist.path] = dist - self.name.setdefault(dist.key, []).append(dist) - - -class DistributionPath(object): - """ - Represents a set of distributions installed on a path (typically sys.path). - """ - def __init__(self, path=None, include_egg=False): - """ - Create an instance from a path, optionally including legacy (distutils/ - setuptools/distribute) distributions. - :param path: The path to use, as a list of directories. If not specified, - sys.path is used. - :param include_egg: If True, this instance will look for and return legacy - distributions as well as those based on PEP 376. - """ - if path is None: - path = sys.path - self.path = path - self._include_dist = True - self._include_egg = include_egg - - self._cache = _Cache() - self._cache_egg = _Cache() - self._cache_enabled = True - self._scheme = get_scheme('default') - - def _get_cache_enabled(self): - return self._cache_enabled - - def _set_cache_enabled(self, value): - self._cache_enabled = value - - cache_enabled = property(_get_cache_enabled, _set_cache_enabled) - - def clear_cache(self): - """ - Clears the internal cache. - """ - self._cache.clear() - self._cache_egg.clear() - - - def _yield_distributions(self): - """ - Yield .dist-info and/or .egg(-info) distributions. - """ - # We need to check if we've seen some resources already, because on - # some Linux systems (e.g. some Debian/Ubuntu variants) there are - # symlinks which alias other files in the environment. - seen = set() - for path in self.path: - finder = resources.finder_for_path(path) - if finder is None: - continue - r = finder.find('') - if not r or not r.is_container: - continue - rset = sorted(r.resources) - for entry in rset: - r = finder.find(entry) - if not r or r.path in seen: - continue - try: - if self._include_dist and entry.endswith(DISTINFO_EXT): - possible_filenames = [METADATA_FILENAME, - WHEEL_METADATA_FILENAME, - LEGACY_METADATA_FILENAME] - for metadata_filename in possible_filenames: - metadata_path = posixpath.join(entry, metadata_filename) - pydist = finder.find(metadata_path) - if pydist: - break - else: - continue - - with contextlib.closing(pydist.as_stream()) as stream: - metadata = Metadata(fileobj=stream, scheme='legacy') - logger.debug('Found %s', r.path) - seen.add(r.path) - yield new_dist_class(r.path, metadata=metadata, - env=self) - elif self._include_egg and entry.endswith(('.egg-info', - '.egg')): - logger.debug('Found %s', r.path) - seen.add(r.path) - yield old_dist_class(r.path, self) - except Exception as e: - msg = 'Unable to read distribution at %s, perhaps due to bad metadata: %s' - logger.warning(msg, r.path, e) - import warnings - warnings.warn(msg % (r.path, e), stacklevel=2) - - def _generate_cache(self): - """ - Scan the path for distributions and populate the cache with - those that are found. - """ - gen_dist = not self._cache.generated - gen_egg = self._include_egg and not self._cache_egg.generated - if gen_dist or gen_egg: - for dist in self._yield_distributions(): - if isinstance(dist, InstalledDistribution): - self._cache.add(dist) - else: - self._cache_egg.add(dist) - - if gen_dist: - self._cache.generated = True - if gen_egg: - self._cache_egg.generated = True - - @classmethod - def distinfo_dirname(cls, name, version): - """ - The *name* and *version* parameters are converted into their - filename-escaped form, i.e. any ``'-'`` characters are replaced - with ``'_'`` other than the one in ``'dist-info'`` and the one - separating the name from the version number. - - :parameter name: is converted to a standard distribution name by replacing - any runs of non- alphanumeric characters with a single - ``'-'``. - :type name: string - :parameter version: is converted to a standard version string. Spaces - become dots, and all other non-alphanumeric characters - (except dots) become dashes, with runs of multiple - dashes condensed to a single dash. - :type version: string - :returns: directory name - :rtype: string""" - name = name.replace('-', '_') - return '-'.join([name, version]) + DISTINFO_EXT - - def get_distributions(self): - """ - Provides an iterator that looks for distributions and returns - :class:`InstalledDistribution` or - :class:`EggInfoDistribution` instances for each one of them. - - :rtype: iterator of :class:`InstalledDistribution` and - :class:`EggInfoDistribution` instances - """ - if not self._cache_enabled: - for dist in self._yield_distributions(): - yield dist - else: - self._generate_cache() - - for dist in self._cache.path.values(): - yield dist - - if self._include_egg: - for dist in self._cache_egg.path.values(): - yield dist - - def get_distribution(self, name): - """ - Looks for a named distribution on the path. - - This function only returns the first result found, as no more than one - value is expected. If nothing is found, ``None`` is returned. - - :rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution` - or ``None`` - """ - result = None - name = name.lower() - if not self._cache_enabled: - for dist in self._yield_distributions(): - if dist.key == name: - result = dist - break - else: - self._generate_cache() - - if name in self._cache.name: - result = self._cache.name[name][0] - elif self._include_egg and name in self._cache_egg.name: - result = self._cache_egg.name[name][0] - return result - - def provides_distribution(self, name, version=None): - """ - Iterates over all distributions to find which distributions provide *name*. - If a *version* is provided, it will be used to filter the results. - - This function only returns the first result found, since no more than - one values are expected. If the directory is not found, returns ``None``. - - :parameter version: a version specifier that indicates the version - required, conforming to the format in ``PEP-345`` - - :type name: string - :type version: string - """ - matcher = None - if version is not None: - try: - matcher = self._scheme.matcher('%s (%s)' % (name, version)) - except ValueError: - raise DistlibException('invalid name or version: %r, %r' % - (name, version)) - - for dist in self.get_distributions(): - # We hit a problem on Travis where enum34 was installed and doesn't - # have a provides attribute ... - if not hasattr(dist, 'provides'): - logger.debug('No "provides": %s', dist) - else: - provided = dist.provides - - for p in provided: - p_name, p_ver = parse_name_and_version(p) - if matcher is None: - if p_name == name: - yield dist - break - else: - if p_name == name and matcher.match(p_ver): - yield dist - break - - def get_file_path(self, name, relative_path): - """ - Return the path to a resource file. - """ - dist = self.get_distribution(name) - if dist is None: - raise LookupError('no distribution named %r found' % name) - return dist.get_resource_path(relative_path) - - def get_exported_entries(self, category, name=None): - """ - Return all of the exported entries in a particular category. - - :param category: The category to search for entries. - :param name: If specified, only entries with that name are returned. - """ - for dist in self.get_distributions(): - r = dist.exports - if category in r: - d = r[category] - if name is not None: - if name in d: - yield d[name] - else: - for v in d.values(): - yield v - - -class Distribution(object): - """ - A base class for distributions, whether installed or from indexes. - Either way, it must have some metadata, so that's all that's needed - for construction. - """ - - build_time_dependency = False - """ - Set to True if it's known to be only a build-time dependency (i.e. - not needed after installation). - """ - - requested = False - """A boolean that indicates whether the ``REQUESTED`` metadata file is - present (in other words, whether the package was installed by user - request or it was installed as a dependency).""" - - def __init__(self, metadata): - """ - Initialise an instance. - :param metadata: The instance of :class:`Metadata` describing this - distribution. - """ - self.metadata = metadata - self.name = metadata.name - self.key = self.name.lower() # for case-insensitive comparisons - self.version = metadata.version - self.locator = None - self.digest = None - self.extras = None # additional features requested - self.context = None # environment marker overrides - self.download_urls = set() - self.digests = {} - - @property - def source_url(self): - """ - The source archive download URL for this distribution. - """ - return self.metadata.source_url - - download_url = source_url # Backward compatibility - - @property - def name_and_version(self): - """ - A utility property which displays the name and version in parentheses. - """ - return '%s (%s)' % (self.name, self.version) - - @property - def provides(self): - """ - A set of distribution names and versions provided by this distribution. - :return: A set of "name (version)" strings. - """ - plist = self.metadata.provides - s = '%s (%s)' % (self.name, self.version) - if s not in plist: - plist.append(s) - return plist - - def _get_requirements(self, req_attr): - md = self.metadata - reqts = getattr(md, req_attr) - logger.debug('%s: got requirements %r from metadata: %r', self.name, req_attr, - reqts) - return set(md.get_requirements(reqts, extras=self.extras, - env=self.context)) - - @property - def run_requires(self): - return self._get_requirements('run_requires') - - @property - def meta_requires(self): - return self._get_requirements('meta_requires') - - @property - def build_requires(self): - return self._get_requirements('build_requires') - - @property - def test_requires(self): - return self._get_requirements('test_requires') - - @property - def dev_requires(self): - return self._get_requirements('dev_requires') - - def matches_requirement(self, req): - """ - Say if this instance matches (fulfills) a requirement. - :param req: The requirement to match. - :rtype req: str - :return: True if it matches, else False. - """ - # Requirement may contain extras - parse to lose those - # from what's passed to the matcher - r = parse_requirement(req) - scheme = get_scheme(self.metadata.scheme) - try: - matcher = scheme.matcher(r.requirement) - except UnsupportedVersionError: - # XXX compat-mode if cannot read the version - logger.warning('could not read version %r - using name only', - req) - name = req.split()[0] - matcher = scheme.matcher(name) - - name = matcher.key # case-insensitive - - result = False - for p in self.provides: - p_name, p_ver = parse_name_and_version(p) - if p_name != name: - continue - try: - result = matcher.match(p_ver) - break - except UnsupportedVersionError: - pass - return result - - def __repr__(self): - """ - Return a textual representation of this instance, - """ - if self.source_url: - suffix = ' [%s]' % self.source_url - else: - suffix = '' - return '- Cómo descargar Gacha Nox en Samsung-Gacha juegos son uno de los géneros más populares de juegos para móviles en el mundo. Permiten a los jugadores coleccionar y personalizar personajes, cartas y otros artículos de varias franquicias y temas. Uno de los juegos gacha más populares es Gacha Club, que permite a los jugadores crear sus propios personajes e historias utilizando cientos de opciones. -Sin embargo, si quieres llevar tu experiencia de juego gacha al siguiente nivel, es posible que quieras probar Gacha Nox, un mod de Gacha Club que ofrece aún más contenido y características. Y si tienes un dispositivo Samsung, puedes disfrutar jugando Gacha Nox en una pantalla más grande con mejor rendimiento y duración de la batería. -cómo descargar nox gacha en samsungDownload ✅ https://bltlly.com/2v6MkR - En este artículo, le mostraremos cómo descargar e instalar Gacha Nox en su dispositivo Samsung, así como algunos consejos y trucos para jugarlo. -¿Qué es Gacha Nox?-Gacha Nox es un mod de Gacha Club creado por Noxula, un fan del juego. Un mod es una modificación de un juego original que añade o cambia algunos aspectos del mismo. Gacha Nox añade cientos de contenidos nuevos y exclusivos a Gacha Club, como: -
Con Gacha Nox, puedes dar rienda suelta a tu creatividad e imaginación y crear tus propios personajes e historias. -¿Por qué es popular Gacha Nox?-Gacha Nox es popular entre los fanáticos del juego gacha porque ofrece más contenido y características que el Gacha Club original. También cuenta con una comunidad amigable y activa de jugadores que comparten sus creaciones y comentarios en plataformas de redes sociales, como YouTube, Instagram, TikTok, etc. - -¿Cuáles son los beneficios de jugar Gacha Nox en Samsung?-Jugar Gacha Nox en dispositivos Samsung tiene muchos beneficios, como: -
Jugar Gacha Nox en dispositivos Samsung puede mejorar su experiencia de juego y hacerlo más divertido y agradable. -Cómo descargar Gacha Nox en dispositivos Samsung-Ahora que sabes lo que es Gacha Nox y por qué es popular y beneficioso para jugar en dispositivos Samsung, vamos a ver cómo descargar e instalar. Es muy fácil y simple de hacer. Solo tienes que seguir estos pasos: -Paso 1: Descargar Gacha Nox APK Archivo-Lo primero que tienes que hacer es descargar el archivo APK Gacha Nox. Un archivo APK es un formato de archivo que contiene el paquete de instalación de una aplicación Android. Puede descargar el archivo Gacha Nox APK desde el sitio web oficial o una fuente de confianza. Asegúrese de elegir la versión que coincida con su dispositivo (32 bits o 64 bits). - -Para descargar el archivo Gacha Nox APK, vaya a https://gachanox.com/download/ o https://noxula.com/gachanox/. A continuación, haga clic en el botón de descarga de la versión que desee. El archivo comenzará a descargarse automáticamente. Puedes comprobar el progreso en la barra de notificaciones o en el gestor de descargas de tu navegador. -Paso 2: Habilitar fuentes desconocidas- -
Ahora ha habilitado fuentes desconocidas en la configuración de su dispositivo. Puede proceder al siguiente paso. -Paso 3: Instalar Gacha Nox APK Archivo-Lo último que debe hacer es instalar el archivo APK de Gacha Nox. Para instalar el archivo APK de Gacha Nox, siga estos pasos: -
Ahora ha instalado el archivo APK Gacha Nox en su dispositivo. Puede proceder al siguiente paso. -Paso 4: Lanza Gacha Nox y disfruta-Lo último que necesitas hacer es lanzar Gacha Nox y disfrutar jugando. Para lanzar Gacha Nox, sigue estos pasos: -
Ahora has lanzado Gacha Nox y puedes disfrutar jugando. Puedes crear y personalizar tus propios personajes e historias usando los cientos de contenidos nuevos y exclusivos que ofrece el mod. También puedes compartir tus creaciones y comentarios con otros jugadores en plataformas de redes sociales. - -Jugar Gacha Nox en dispositivos Samsung puede ser divertido y agradable, pero también puede ser desafiante y frustrante si no sabes algunos consejos y trucos. Aquí hay algunos consejos y trucos que pueden ayudarle a jugar Gacha Nox en dispositivos samsung mejor: -Utilice atajos de teclado para un juego más rápido y fácil-Uno de los consejos que puede ayudarle a jugar Gacha Nox en dispositivos Samsung más rápido y más fácil es utilizar atajos de teclado. Los atajos de teclado son combinaciones de teclas que realizan acciones comunes en el juego, como mover personajes, cambiar escenas, tomar capturas de pantalla y grabar videos. El uso de atajos de teclado puede ahorrarle tiempo y esfuerzo, así como hacer que su juego sea más suave y conveniente. -Aquí hay una tabla de algunos atajos de teclado que puedes usar en Gacha Nox: -Acción |
-Atajo de teclado |
-Mover el carácter a la izquierda |
-A |
-Mover el carácter a la derecha |
-D |
-Mover caracteres hacia arriba |
-W |
-Mover caracteres hacia abajo |
-S |
-Cambiar escena izquierda |
-Q |
-Cambiar escena derecha |
-E |
-Captura de pantalla |
-F12 |
-Grabar vídeo |
-F11 |
-Pausar/reanudar la grabación de video |
-F10 |
-Detener la grabación de vídeo |
-F9 |
-También puedes personalizar tus propios atajos de teclado en la configuración del juego si quieres. -Ajuste de los ajustes gráficos para un rendimiento óptimo y duración de la batería- -Aquí hay una tabla de algunos ajustes gráficos que puede ajustar en Gacha Nox y sus efectos: -Configuración de gráficos |
-Efecto |
-Resolución |
-El número de píxeles que componen la pantalla del juego. Mayor resolución significa imágenes más nítidas y claras, pero también más consumo de energía y menor rendimiento. |
-Velocidad de fotogramas |
-El número de marcos que se muestran por segundo. Mayor velocidad de fotogramas significa animaciones más fluidas y fluidas, pero también más consumo de energía y menor rendimiento. |
-Brillo |
-El nivel de ligereza u oscuridad de la pantalla del juego. Mayor brillo significa imágenes más brillantes y más visibles, pero también más consumo de energía y tensión ocular. |
-Contraste |
-El nivel de diferencia entre las partes más claras y oscuras de la pantalla del juego. Un mayor contraste significa imágenes más vívidas y coloridas, pero también más tensión ocular y distorsión. |
-Saturación |
-El nivel de intensidad o pureza de los colores en la pantalla del juego. Mayor saturación significa colores más vibrantes y ricos, pero también más tensión ocular y distorsión. |
-Hue |
-El nivel de cambio o cambio en los colores en la pantalla del juego. Un tono más alto significa colores más variados y diversos, pero también más tensión ocular y distorsión. |
-Anti-aliasing |
-El proceso de suavizar los bordes dentados o píxeles en la pantalla del juego. Mayor anti-aliasing significa imágenes más suaves y realistas, pero también más consumo de energía y menor rendimiento. |
-Calidad de la textura |
-El nivel de detalle o nitidez de las texturas en la pantalla del juego. Mayor calidad de textura significa imágenes más realistas e inmersivas, pero también más consumo de energía y menor rendimiento. |
-Calidad de sombra |
-
-Puede ajustar la configuración de gráficos en la configuración del juego utilizando los controles deslizantes o botones. También puede usar los presets para elegir la mejor configuración de gráficos para su dispositivo. -Copia de seguridad de sus datos regularmente para evitar perder el progreso-El último consejo que puede ayudarle a jugar Gacha Nox en dispositivos Samsung mejor es hacer copias de seguridad de sus datos con regularidad. Los datos son la información que se almacena en su dispositivo, como sus personajes, historias, capturas de pantalla, videos, etc. Hacer copias de seguridad de sus datos significa guardarlos o copiarlos en otra ubicación, como la nube o un dispositivo diferente. Hacer copias de seguridad de tus datos puede ayudarte a evitar perder tu progreso si algo le sucede a tu dispositivo, como daños, robo o mal funcionamiento. -Hay dos maneras de hacer copias de seguridad de sus datos en Gacha Nox: -
Aquí hay una tabla de algunas ubicaciones de carpetas de datos para diferentes dispositivos: -Dispositivo |
-Ubicación de la carpeta de datos |
-Samsung Galaxy S21 |
-/storage/emulated/0/Android/data/air.com.lunime.gachanox/files/GachaNox/ |
-Samsung Galaxy Tab S7 |
-
-Samsung Galaxy Note 20 |
-/storage/emulated/0/Android/data/air.com.lunime.gachanox/files/GachaNox/ |
-Samsung Galaxy A51 |
-/storage/emulated/0/Android/data/air.com.lunime.gachanox/files/GachaNox/ |
-Samsung Galaxy Z Fold 3 |
-/storage/emulated/0/Android/data/air.com.lunime.gachanox/files/GachaNox/ |
-Deberías hacer copias de seguridad de tus datos regularmente, especialmente antes de actualizar o desinstalar el juego, o cambiar dispositivos. De esta manera, puede restaurar sus datos y continuar jugando sin perder nada. -Conclusión-Gacha Nox es un mod de Gacha Club que ofrece cientos de contenido nuevo y exclusivo y características para los fanáticos del juego gacha. Es gratis para descargar y jugar, y tiene una comunidad de jugadores amigable y activa. Jugar Gacha Nox en dispositivos Samsung puede mejorar su experiencia de juego y hacerlo más divertido y agradable. -Para descargar e instalar Gacha Nox en dispositivos Samsung, debe seguir estos pasos: -
Para jugar Gacha Nox en dispositivos samsung mejor, puede utilizar estos consejos y trucos: -
Esperamos que este artículo le ayudó a aprender a descargar y jugar Gacha Nox en dispositivos Samsung. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. Happy gacha gaming! -Preguntas frecuentes (preguntas frecuentes)-Q: ¿Es seguro descargar y jugar Gacha Nox?- -Q: ¿Cómo puedo actualizar Gacha Nox a la última versión?-A: Para actualizar Gacha Nox a la última versión, es necesario descargar e instalar el nuevo archivo APK desde el sitio web oficial o una fuente de confianza. Puede consultar el sitio web o las plataformas de medios sociales del modder para cualquier anuncio o noticias sobre nuevas actualizaciones. También debes hacer una copia de seguridad de tus datos antes de actualizar el juego, en caso de que algo salga mal. -Q: ¿Cómo puedo contactar al modder o a la comunidad de Gacha Nox?-A: Para contactar con el modder o la comunidad de Gacha Nox, puede utilizar las siguientes plataformas: -
También puede dejar un comentario en el sitio web o en la página de la tienda de aplicaciones de Gacha Nox. -Q: ¿Cómo puedo apoyar el modder de Gacha Nox?-A: Para soportar el modder de Gacha Nox, puedes hacer las siguientes cosas: -
También puedes agradecer y apreciar el modder por su duro trabajo y dedicación. -Q: ¿Cuáles son algunos otros juegos gacha o mods que puedo jugar?-A: Si te gustan los juegos gacha o mods, puedes probar algunos de estos: -
- - \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Descargar Alphazero Chess Engine.md b/spaces/Benson/text-generation/Examples/Descargar Alphazero Chess Engine.md deleted file mode 100644 index eaea4e60110ba33c554c3cc6f8cbfb5a2db641fa..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar Alphazero Chess Engine.md +++ /dev/null @@ -1,71 +0,0 @@ - - Cómo descargar el motor de ajedrez AlphaZero-AlphaZero es un programa de computadora desarrollado por DeepMind de Google que logró un nivel sobrehumano de juego en ajedrez, shogi y go. Aprendió los juegos desde cero jugando contra sí mismo, utilizando una red neuronal profunda y el aprendizaje de refuerzo. Derrotó al motor de ajedrez más fuerte del mundo, Stockfish, en un partido de 100 partidas en 2017, mostrando una comprensión notable de los conceptos y estrategias de ajedrez. -descargar alphazero chess engineDownload ★★★ https://bltlly.com/2v6Kt1 - Desafortunadamente, AlphaZero no está disponible para el público, ya que se ejecuta en hardware personalizado y no es lanzado por DeepMind. Sin embargo, hay algunas alternativas que puedes descargar y usar en tu PC, que se basan en las mismas técnicas que AlphaZero. En este artículo, le mostraremos cómo descargar y usar dos de ellos: Leela Chess Zero y AllieStein. -Opción 1: Usar Leela Chess Zero-Leela Chess Zero (LC0) es un proyecto de código abierto que pretende replicar el enfoque de AlphaZero para el ajedrez. Utiliza una red neuronal que se entrena por auto-juego y un algoritmo de búsqueda de árbol de Monte Carlo que guía la búsqueda. Puede jugar a un nivel muy alto, comparable a Stockfish, y tiene un estilo único y creativo. -Cómo instalar Leela Chess Zero en tu PC-Para instalar Leela Chess Zero en tu PC, debes seguir estos pasos: - -
Felicidades, has instalado Leela Chess Zero en tu PC! -Cómo usar Leela Chess Zero como un motor UCI en software de ajedrez- -
Disfruta usando Leela Chess Zero como tu compañero de ajedrez! -Opción 2: Usar AllieStein-AllieStein es otro motor de ajedrez de red neuronal que se basa en técnicas AlphaZero. Está desarrollado por Adam Treat y Mark Jordan, e incorpora algunos conocimientos e innovaciones humanas que no están presentes en el documento original de AlphaZero. También es muy fuerte y ha ganado varios torneos contra otros motores. -Cómo descargar AllieStein desde su sitio web-Para descargar AllieStein desde su sitio web, debe seguir estos pasos: -
Felicidades, ¡has descargado AllieStein de su sitio web! -Cómo usar AllieStein como un motor UCI en software de ajedrez- -Disfruta usando AllieStein como tu compañero de ajedrez! -Conclusión-En este artículo, le hemos mostrado cómo descargar y usar dos alternativas al motor de ajedrez AlphaZero: Leela Chess Zero y AllieStein. Ambos se basan en las mismas técnicas que AlphaZero, como las redes neuronales y el aprendizaje de refuerzo, y pueden jugar a un nivel muy alto, comparable a Stockfish. También tienen estilos únicos y creativos que pueden ayudarle a mejorar su comprensión y habilidades de ajedrez. -Si está interesado en probar estos motores, puede seguir los pasos que hemos proporcionado e instalarlos en su PC. A continuación, puede utilizarlos como motores UCI en su software de ajedrez y empezar a analizar o jugar. Usted se sorprenderá por su fuerza y belleza! -Preguntas frecuentes-Q: ¿Es AlphaZero mejor que Stockfish?-A: Según los resultados del partido de 2017, AlphaZero derrotó a Stockfish por una puntuación de 64-36, con 28 victorias, 72 empates y ninguna pérdida. Sin embargo, algunos factores pueden haber influido en el resultado, como el control de tiempo, el hardware o la versión de Stockfish utilizada. Por lo tanto, es difícil decir con seguridad cuál es mejor. -Q: ¿Cómo puedo jugar contra AlphaZero online?-A: Desafortunadamente, no puedes jugar contra AlphaZero en línea, ya que no está disponible para el público. Sin embargo, puedes jugar contra algunas de sus alternativas, como Leela Chess Zero o AllieStein, en algunos sitios web o aplicaciones que los soportan. Por ejemplo, puedes probar este sitio web o Fat Fritz 2: Un motor comercial desarrollado por ChessBase que utiliza una versión modificada de la búsqueda de Stockfish y una gran red neuronal entrenada en juegos humanos y de computadora. - P: ¿Cuáles son algunos de los beneficios de usar motores de ajedrez de red neuronal?-A: Algunos beneficios A: Algunos beneficios de usar motores de ajedrez de redes neuronales son: -
Espero que haya encontrado este artículo útil e informativo. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. ¡Gracias por leer! 64aa2da5cf- - \ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/operations/build/wheel_editable.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/operations/build/wheel_editable.py deleted file mode 100644 index 719d69dd801b78b360c6c2234080eee638b8de82..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/operations/build/wheel_editable.py +++ /dev/null @@ -1,46 +0,0 @@ -import logging -import os -from typing import Optional - -from pip._vendor.pyproject_hooks import BuildBackendHookCaller, HookMissing - -from pip._internal.utils.subprocess import runner_with_spinner_message - -logger = logging.getLogger(__name__) - - -def build_wheel_editable( - name: str, - backend: BuildBackendHookCaller, - metadata_directory: str, - tempd: str, -) -> Optional[str]: - """Build one InstallRequirement using the PEP 660 build process. - - Returns path to wheel if successfully built. Otherwise, returns None. - """ - assert metadata_directory is not None - try: - logger.debug("Destination directory: %s", tempd) - - runner = runner_with_spinner_message( - f"Building editable for {name} (pyproject.toml)" - ) - with backend.subprocess_runner(runner): - try: - wheel_name = backend.build_editable( - tempd, - metadata_directory=metadata_directory, - ) - except HookMissing as e: - logger.error( - "Cannot build editable %s because the build " - "backend does not have the %s hook", - name, - e, - ) - return None - except Exception: - logger.error("Failed building editable for %s", name) - return None - return os.path.join(tempd, wheel_name) diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/_stack.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/_stack.py deleted file mode 100644 index 194564e761ddae165b39ef6598877e2e3820af0a..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/_stack.py +++ /dev/null @@ -1,16 +0,0 @@ -from typing import List, TypeVar - -T = TypeVar("T") - - -class Stack(List[T]): - """A small shim over builtin list.""" - - @property - def top(self) -> T: - """Get top of stack.""" - return self[-1] - - def push(self, item: T) -> None: - """Push an item on to the stack (append in stack nomenclature).""" - self.append(item) diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/layout.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/layout.py deleted file mode 100644 index 849356ea9a03a031abce367b955a30fce26c9845..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/layout.py +++ /dev/null @@ -1,443 +0,0 @@ -from abc import ABC, abstractmethod -from itertools import islice -from operator import itemgetter -from threading import RLock -from typing import ( - TYPE_CHECKING, - Dict, - Iterable, - List, - NamedTuple, - Optional, - Sequence, - Tuple, - Union, -) - -from ._ratio import ratio_resolve -from .align import Align -from .console import Console, ConsoleOptions, RenderableType, RenderResult -from .highlighter import ReprHighlighter -from .panel import Panel -from .pretty import Pretty -from .region import Region -from .repr import Result, rich_repr -from .segment import Segment -from .style import StyleType - -if TYPE_CHECKING: - from pip._vendor.rich.tree import Tree - - -class LayoutRender(NamedTuple): - """An individual layout render.""" - - region: Region - render: List[List[Segment]] - - -RegionMap = Dict["Layout", Region] -RenderMap = Dict["Layout", LayoutRender] - - -class LayoutError(Exception): - """Layout related error.""" - - -class NoSplitter(LayoutError): - """Requested splitter does not exist.""" - - -class _Placeholder: - """An internal renderable used as a Layout placeholder.""" - - highlighter = ReprHighlighter() - - def __init__(self, layout: "Layout", style: StyleType = "") -> None: - self.layout = layout - self.style = style - - def __rich_console__( - self, console: Console, options: ConsoleOptions - ) -> RenderResult: - width = options.max_width - height = options.height or options.size.height - layout = self.layout - title = ( - f"{layout.name!r} ({width} x {height})" - if layout.name - else f"({width} x {height})" - ) - yield Panel( - Align.center(Pretty(layout), vertical="middle"), - style=self.style, - title=self.highlighter(title), - border_style="blue", - height=height, - ) - - -class Splitter(ABC): - """Base class for a splitter.""" - - name: str = "" - - @abstractmethod - def get_tree_icon(self) -> str: - """Get the icon (emoji) used in layout.tree""" - - @abstractmethod - def divide( - self, children: Sequence["Layout"], region: Region - ) -> Iterable[Tuple["Layout", Region]]: - """Divide a region amongst several child layouts. - - Args: - children (Sequence(Layout)): A number of child layouts. - region (Region): A rectangular region to divide. - """ - - -class RowSplitter(Splitter): - """Split a layout region in to rows.""" - - name = "row" - - def get_tree_icon(self) -> str: - return "[layout.tree.row]⬌" - - def divide( - self, children: Sequence["Layout"], region: Region - ) -> Iterable[Tuple["Layout", Region]]: - x, y, width, height = region - render_widths = ratio_resolve(width, children) - offset = 0 - _Region = Region - for child, child_width in zip(children, render_widths): - yield child, _Region(x + offset, y, child_width, height) - offset += child_width - - -class ColumnSplitter(Splitter): - """Split a layout region in to columns.""" - - name = "column" - - def get_tree_icon(self) -> str: - return "[layout.tree.column]⬍" - - def divide( - self, children: Sequence["Layout"], region: Region - ) -> Iterable[Tuple["Layout", Region]]: - x, y, width, height = region - render_heights = ratio_resolve(height, children) - offset = 0 - _Region = Region - for child, child_height in zip(children, render_heights): - yield child, _Region(x, y + offset, width, child_height) - offset += child_height - - -@rich_repr -class Layout: - """A renderable to divide a fixed height in to rows or columns. - - Args: - renderable (RenderableType, optional): Renderable content, or None for placeholder. Defaults to None. - name (str, optional): Optional identifier for Layout. Defaults to None. - size (int, optional): Optional fixed size of layout. Defaults to None. - minimum_size (int, optional): Minimum size of layout. Defaults to 1. - ratio (int, optional): Optional ratio for flexible layout. Defaults to 1. - visible (bool, optional): Visibility of layout. Defaults to True. - """ - - splitters = {"row": RowSplitter, "column": ColumnSplitter} - - def __init__( - self, - renderable: Optional[RenderableType] = None, - *, - name: Optional[str] = None, - size: Optional[int] = None, - minimum_size: int = 1, - ratio: int = 1, - visible: bool = True, - ) -> None: - self._renderable = renderable or _Placeholder(self) - self.size = size - self.minimum_size = minimum_size - self.ratio = ratio - self.name = name - self.visible = visible - self.splitter: Splitter = self.splitters["column"]() - self._children: List[Layout] = [] - self._render_map: RenderMap = {} - self._lock = RLock() - - def __rich_repr__(self) -> Result: - yield "name", self.name, None - yield "size", self.size, None - yield "minimum_size", self.minimum_size, 1 - yield "ratio", self.ratio, 1 - - @property - def renderable(self) -> RenderableType: - """Layout renderable.""" - return self if self._children else self._renderable - - @property - def children(self) -> List["Layout"]: - """Gets (visible) layout children.""" - return [child for child in self._children if child.visible] - - @property - def map(self) -> RenderMap: - """Get a map of the last render.""" - return self._render_map - - def get(self, name: str) -> Optional["Layout"]: - """Get a named layout, or None if it doesn't exist. - - Args: - name (str): Name of layout. - - Returns: - Optional[Layout]: Layout instance or None if no layout was found. - """ - if self.name == name: - return self - else: - for child in self._children: - named_layout = child.get(name) - if named_layout is not None: - return named_layout - return None - - def __getitem__(self, name: str) -> "Layout": - layout = self.get(name) - if layout is None: - raise KeyError(f"No layout with name {name!r}") - return layout - - @property - def tree(self) -> "Tree": - """Get a tree renderable to show layout structure.""" - from pip._vendor.rich.styled import Styled - from pip._vendor.rich.table import Table - from pip._vendor.rich.tree import Tree - - def summary(layout: "Layout") -> Table: - - icon = layout.splitter.get_tree_icon() - - table = Table.grid(padding=(0, 1, 0, 0)) - - text: RenderableType = ( - Pretty(layout) if layout.visible else Styled(Pretty(layout), "dim") - ) - table.add_row(icon, text) - _summary = table - return _summary - - layout = self - tree = Tree( - summary(layout), - guide_style=f"layout.tree.{layout.splitter.name}", - highlight=True, - ) - - def recurse(tree: "Tree", layout: "Layout") -> None: - for child in layout._children: - recurse( - tree.add( - summary(child), - guide_style=f"layout.tree.{child.splitter.name}", - ), - child, - ) - - recurse(tree, self) - return tree - - def split( - self, - *layouts: Union["Layout", RenderableType], - splitter: Union[Splitter, str] = "column", - ) -> None: - """Split the layout in to multiple sub-layouts. - - Args: - *layouts (Layout): Positional arguments should be (sub) Layout instances. - splitter (Union[Splitter, str]): Splitter instance or name of splitter. - """ - _layouts = [ - layout if isinstance(layout, Layout) else Layout(layout) - for layout in layouts - ] - try: - self.splitter = ( - splitter - if isinstance(splitter, Splitter) - else self.splitters[splitter]() - ) - except KeyError: - raise NoSplitter(f"No splitter called {splitter!r}") - self._children[:] = _layouts - - def add_split(self, *layouts: Union["Layout", RenderableType]) -> None: - """Add a new layout(s) to existing split. - - Args: - *layouts (Union[Layout, RenderableType]): Positional arguments should be renderables or (sub) Layout instances. - - """ - _layouts = ( - layout if isinstance(layout, Layout) else Layout(layout) - for layout in layouts - ) - self._children.extend(_layouts) - - def split_row(self, *layouts: Union["Layout", RenderableType]) -> None: - """Split the layout in to a row (layouts side by side). - - Args: - *layouts (Layout): Positional arguments should be (sub) Layout instances. - """ - self.split(*layouts, splitter="row") - - def split_column(self, *layouts: Union["Layout", RenderableType]) -> None: - """Split the layout in to a column (layouts stacked on top of each other). - - Args: - *layouts (Layout): Positional arguments should be (sub) Layout instances. - """ - self.split(*layouts, splitter="column") - - def unsplit(self) -> None: - """Reset splits to initial state.""" - del self._children[:] - - def update(self, renderable: RenderableType) -> None: - """Update renderable. - - Args: - renderable (RenderableType): New renderable object. - """ - with self._lock: - self._renderable = renderable - - def refresh_screen(self, console: "Console", layout_name: str) -> None: - """Refresh a sub-layout. - - Args: - console (Console): Console instance where Layout is to be rendered. - layout_name (str): Name of layout. - """ - with self._lock: - layout = self[layout_name] - region, _lines = self._render_map[layout] - (x, y, width, height) = region - lines = console.render_lines( - layout, console.options.update_dimensions(width, height) - ) - self._render_map[layout] = LayoutRender(region, lines) - console.update_screen_lines(lines, x, y) - - def _make_region_map(self, width: int, height: int) -> RegionMap: - """Create a dict that maps layout on to Region.""" - stack: List[Tuple[Layout, Region]] = [(self, Region(0, 0, width, height))] - push = stack.append - pop = stack.pop - layout_regions: List[Tuple[Layout, Region]] = [] - append_layout_region = layout_regions.append - while stack: - append_layout_region(pop()) - layout, region = layout_regions[-1] - children = layout.children - if children: - for child_and_region in layout.splitter.divide(children, region): - push(child_and_region) - - region_map = { - layout: region - for layout, region in sorted(layout_regions, key=itemgetter(1)) - } - return region_map - - def render(self, console: Console, options: ConsoleOptions) -> RenderMap: - """Render the sub_layouts. - - Args: - console (Console): Console instance. - options (ConsoleOptions): Console options. - - Returns: - RenderMap: A dict that maps Layout on to a tuple of Region, lines - """ - render_width = options.max_width - render_height = options.height or console.height - region_map = self._make_region_map(render_width, render_height) - layout_regions = [ - (layout, region) - for layout, region in region_map.items() - if not layout.children - ] - render_map: Dict["Layout", "LayoutRender"] = {} - render_lines = console.render_lines - update_dimensions = options.update_dimensions - - for layout, region in layout_regions: - lines = render_lines( - layout.renderable, update_dimensions(region.width, region.height) - ) - render_map[layout] = LayoutRender(region, lines) - return render_map - - def __rich_console__( - self, console: Console, options: ConsoleOptions - ) -> RenderResult: - with self._lock: - width = options.max_width or console.width - height = options.height or console.height - render_map = self.render(console, options.update_dimensions(width, height)) - self._render_map = render_map - layout_lines: List[List[Segment]] = [[] for _ in range(height)] - _islice = islice - for (region, lines) in render_map.values(): - _x, y, _layout_width, layout_height = region - for row, line in zip( - _islice(layout_lines, y, y + layout_height), lines - ): - row.extend(line) - - new_line = Segment.line() - for layout_row in layout_lines: - yield from layout_row - yield new_line - - -if __name__ == "__main__": - from pip._vendor.rich.console import Console - - console = Console() - layout = Layout() - - layout.split_column( - Layout(name="header", size=3), - Layout(ratio=1, name="main"), - Layout(size=10, name="footer"), - ) - - layout["main"].split_row(Layout(name="side"), Layout(name="body", ratio=2)) - - layout["body"].split_row(Layout(name="content", ratio=2), Layout(name="s2")) - - layout["s2"].split_column( - Layout(name="top"), Layout(name="middle"), Layout(name="bottom") - ) - - layout["side"].split_column(Layout(layout.tree, name="left1"), Layout(name="left2")) - - layout["content"].update("foo") - - console.print(layout) diff --git a/spaces/Bishnupada/Fine-tuning-using-Hugging-face-transformers/README.md b/spaces/Bishnupada/Fine-tuning-using-Hugging-face-transformers/README.md deleted file mode 100644 index bf3e6089653060c86e097f933b2c357f69ea9b4c..0000000000000000000000000000000000000000 --- a/spaces/Bishnupada/Fine-tuning-using-Hugging-face-transformers/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Fine Tuning Using Hugging Face Transformers -emoji: 🔥 -colorFrom: pink -colorTo: blue -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/BlinkDL/ChatRWKV-gradio/README.md b/spaces/BlinkDL/ChatRWKV-gradio/README.md deleted file mode 100644 index b066e74bd4dd2cd71e82d13ec94a315694ba18e2..0000000000000000000000000000000000000000 --- a/spaces/BlinkDL/ChatRWKV-gradio/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ChatRWKV -emoji: 💻 -colorFrom: gray -colorTo: blue -sdk: gradio -sdk_version: 3.28.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/BrunoBall/Kaludi-ARTificialJourney-v1.0-768/app.py b/spaces/BrunoBall/Kaludi-ARTificialJourney-v1.0-768/app.py deleted file mode 100644 index bad2d51fb69780eb9095ba47291985b7f517b836..0000000000000000000000000000000000000000 --- a/spaces/BrunoBall/Kaludi-ARTificialJourney-v1.0-768/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/Kaludi/ARTificialJourney-v1.0-768").launch() \ No newline at end of file diff --git a/spaces/CVPR/CVPR2022_papers/app.py b/spaces/CVPR/CVPR2022_papers/app.py deleted file mode 100644 index 444d2982186e990f741f32cf0b3901ffe6cd36e4..0000000000000000000000000000000000000000 --- a/spaces/CVPR/CVPR2022_papers/app.py +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations - -import gradio as gr - -from paper_list import PaperList - -DESCRIPTION = '# CVPR 2022 Papers' -NOTES = ''' -- [CVPR 2022](https://cvpr2022.thecvf.com/) -- [Proceedings](https://openaccess.thecvf.com/CVPR2022) -''' - -paper_list = PaperList() - -with gr.Blocks(css='style.css') as demo: - gr.Markdown(DESCRIPTION) - - search_box = gr.Textbox( - label='Search Title', - placeholder= - 'You can search for titles with regular expressions. e.g. (? stride 2 max pool - - -class ResNet(Backbone): - def __init__(self, stem, stages, num_classes=None, out_features=None): - """ - Args: - stem (nn.Module): a stem module - stages (list[list[ResNetBlock]]): several (typically 4) stages, - each contains multiple :class:`ResNetBlockBase`. - num_classes (None or int): if None, will not perform classification. - out_features (list[str]): name of the layers whose outputs should - be returned in forward. Can be anything in "stem", "linear", or "res2" ... - If None, will return the output of the last layer. - """ - super(ResNet, self).__init__() - self.stem = stem - self.num_classes = num_classes - - current_stride = self.stem.stride - self._out_feature_strides = {"stem": current_stride} - self._out_feature_channels = {"stem": self.stem.out_channels} - - self.stages_and_names = [] - for i, blocks in enumerate(stages): - for block in blocks: - assert isinstance(block, ResNetBlockBase), block - curr_channels = block.out_channels - stage = nn.Sequential(*blocks) - name = "res" + str(i + 2) - self.add_module(name, stage) - self.stages_and_names.append((stage, name)) - self._out_feature_strides[name] = current_stride = int( - current_stride * np.prod([k.stride for k in blocks]) - ) - self._out_feature_channels[name] = blocks[-1].out_channels - - if num_classes is not None: - self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) - self.linear = nn.Linear(curr_channels, num_classes) - - # Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour": - # "The 1000-way fully-connected layer is initialized by - # drawing weights from a zero-mean Gaussian with standard deviation of 0.01." - nn.init.normal_(self.linear.weight, std=0.01) - name = "linear" - - if out_features is None: - out_features = [name] - self._out_features = out_features - assert len(self._out_features) - children = [x[0] for x in self.named_children()] - for out_feature in self._out_features: - assert out_feature in children, "Available children: {}".format(", ".join(children)) - - def forward(self, x): - outputs = {} - x = self.stem(x) - if "stem" in self._out_features: - outputs["stem"] = x - for stage, name in self.stages_and_names: - x = stage(x) - if name in self._out_features: - outputs[name] = x - if self.num_classes is not None: - x = self.avgpool(x) - x = torch.flatten(x, 1) - x = self.linear(x) - if "linear" in self._out_features: - outputs["linear"] = x - return outputs - - def output_shape(self): - return { - name: ShapeSpec( - channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] - ) - for name in self._out_features - } - - -@BACKBONE_REGISTRY.register() -def build_resnet_backbone(cfg, input_shape): - """ - Create a ResNet instance from config. - - Returns: - ResNet: a :class:`ResNet` instance. - """ - # need registration of new blocks/stems? - norm = cfg.MODEL.RESNETS.NORM - stem = BasicStem( - in_channels=input_shape.channels, - out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS, - norm=norm, - ) - freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT - - if freeze_at >= 1: - for p in stem.parameters(): - p.requires_grad = False - stem = FrozenBatchNorm2d.convert_frozen_batchnorm(stem) - - # fmt: off - out_features = cfg.MODEL.RESNETS.OUT_FEATURES - depth = cfg.MODEL.RESNETS.DEPTH - num_groups = cfg.MODEL.RESNETS.NUM_GROUPS - width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP - bottleneck_channels = num_groups * width_per_group - in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS - out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS - stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1 - res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION - deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE - deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED - deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS - # fmt: on - assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation) - - num_blocks_per_stage = { - 18: [2, 2, 2, 2], - 34: [3, 4, 6, 3], - 50: [3, 4, 6, 3], - 101: [3, 4, 23, 3], - 152: [3, 8, 36, 3], - }[depth] - - if depth in [18, 34]: - assert out_channels == 64, "Must set MODEL.RESNETS.RES2_OUT_CHANNELS = 64 for R18/R34" - assert not any( - deform_on_per_stage - ), "MODEL.RESNETS.DEFORM_ON_PER_STAGE unsupported for R18/R34" - assert res5_dilation == 1, "Must set MODEL.RESNETS.RES5_DILATION = 1 for R18/R34" - assert num_groups == 1, "Must set MODEL.RESNETS.NUM_GROUPS = 1 for R18/R34" - - stages = [] - - # Avoid creating variables without gradients - # It consumes extra memory and may cause allreduce to fail - out_stage_idx = [{"res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features] - max_stage_idx = max(out_stage_idx) - for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)): - dilation = res5_dilation if stage_idx == 5 else 1 - first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2 - stage_kargs = { - "num_blocks": num_blocks_per_stage[idx], - "first_stride": first_stride, - "in_channels": in_channels, - "out_channels": out_channels, - "norm": norm, - } - # Use BasicBlock for R18 and R34. - if depth in [18, 34]: - stage_kargs["block_class"] = BasicBlock - else: - stage_kargs["bottleneck_channels"] = bottleneck_channels - stage_kargs["stride_in_1x1"] = stride_in_1x1 - stage_kargs["dilation"] = dilation - stage_kargs["num_groups"] = num_groups - if deform_on_per_stage[idx]: - stage_kargs["block_class"] = DeformBottleneckBlock - stage_kargs["deform_modulated"] = deform_modulated - stage_kargs["deform_num_groups"] = deform_num_groups - else: - stage_kargs["block_class"] = BottleneckBlock - blocks = make_stage(**stage_kargs) - in_channels = out_channels - out_channels *= 2 - bottleneck_channels *= 2 - - if freeze_at >= stage_idx: - for block in blocks: - block.freeze() - stages.append(blocks) - return ResNet(stem, stages, out_features=out_features) diff --git a/spaces/CVPR/LIVE/thrust/thrust/iterator/detail/any_assign.h b/spaces/CVPR/LIVE/thrust/thrust/iterator/detail/any_assign.h deleted file mode 100644 index 4e7f2cf20bedd44001611b62ce498ea9687dd7db..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/iterator/detail/any_assign.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include
-
-"""
-
-
-prompt_hints = """
-Chat about Dialogues • Games • AI • AI Regulation-Chat is built from:
-
-"""
-
-# from index import PERSIST_DIRECTORY, CalendarIndex
-PERSIST_DIRECTORY = "chromadb"
-# Create embeddings
-
-# # create memory object
-from langchain.memory import ConversationBufferMemory
-memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
-
-def loading_pdf():
- return "Loading..."
-
-def loading_database(open_ai_key):
- if open_ai_key is not None:
- if os.path.exists(PERSIST_DIRECTORY):
- embeddings = OpenAIEmbeddings(openai_api_key=open_ai_key)
- docs_retriever = Chroma(persist_directory=PERSIST_DIRECTORY, embedding_function=embeddings)
-
- global qa_chain
- qa_chain = ConversationalRetrievalChain.from_llm(ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.0, openai_api_key=open_ai_key),
- retriever=docs_retriever.as_retriever(),
- memory=memory,
- return_source_documents=False
- )
- return "Ready"
- else:
- return "You forgot OpenAI API key"
-
-def add_text(history, text):
- history = history + [(text, None)]
- return history, ""
-
-
-def bot(history):
- response = infer(history[-1][0], history)
- history[-1][1] = ""
- for character in response:
- history[-1][1] += character
- time.sleep(0.05)
- yield history
-
-
-def infer(question, history):
- res = []
- for human, ai in history[:-1]:
- pair = (human, ai)
- res.append(pair)
-
- chat_history = res
- query = question
- result = qa_chain({"question": query, "chat_history": chat_history})
- return result["answer"]
-
-def update_message(question_component, chat_prompts):
- question_component.value = chat_prompts.get_name()
- return None
-
-with gr.Blocks(css=css) as demo:
- with gr.Column(elem_id="col-container"):
- gr.HTML(title)
- with gr.Column():
- with gr.Row():
- openai_key = gr.Textbox(label="OpenAI API key", type="password")
- submit_api_key = gr.Button("Submit")
- with gr.Row():
- langchain_status = gr.Textbox(label="Status", placeholder="", interactive=False)
-
- chatbot = gr.Chatbot([], elem_id="chatbot").style(height=350)
- question = gr.Textbox(label="Question", placeholder="Type your question and hit Enter ")
- submit_btn = gr.Button("Send Message")
- gr.HTML(prompt_hints)
-
- submit_api_key.click(loading_database, inputs=[openai_key], outputs=[langchain_status], queue=False)
- # demo.load(loading_database, None, langchain_status)
- question.submit(add_text, [chatbot, question], [chatbot, question]).then(
- bot, chatbot, chatbot
- )
- submit_btn.click(add_text, [chatbot, question], [chatbot, question]).then(
- bot, chatbot, chatbot)
-
-demo.queue(concurrency_count=2, max_size=20).launch()
\ No newline at end of file
diff --git a/spaces/DonDoesStuff/orca-mini-3b-chat/README.md b/spaces/DonDoesStuff/orca-mini-3b-chat/README.md
deleted file mode 100644
index a962aeb5c85575c07ad726496e168af5ed77079e..0000000000000000000000000000000000000000
--- a/spaces/DonDoesStuff/orca-mini-3b-chat/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Orca Mini 3b Chat
-emoji: ⚡
-colorFrom: green
-colorTo: pink
-sdk: gradio
-sdk_version: 3.39.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/DragGan/DragGan-Inversion/stylegan_human/pti/pti_configs/hyperparameters.py b/spaces/DragGan/DragGan-Inversion/stylegan_human/pti/pti_configs/hyperparameters.py
deleted file mode 100644
index ca3a22302a7c5b31a6aa15492a860aa367776e4b..0000000000000000000000000000000000000000
--- a/spaces/DragGan/DragGan-Inversion/stylegan_human/pti/pti_configs/hyperparameters.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# Architechture
-lpips_type = 'alex'
-first_inv_type = 'w+' # 'w+'
-optim_type = 'adam'
-
-# Locality regularization
-latent_ball_num_of_samples = 1
-locality_regularization_interval = 1
-use_locality_regularization = False
-regulizer_l2_lambda = 0.1
-regulizer_lpips_lambda = 0.1
-regulizer_alpha = 30
-
-# Loss
-pt_l2_lambda = 1
-pt_lpips_lambda = 1
-
-# Steps
-LPIPS_value_threshold = 0.04
-max_pti_steps = 350
-first_inv_steps = 450
-max_images_to_invert = 30
-
-# Optimization
-pti_learning_rate = 5e-4
-first_inv_lr = 8e-3
-train_batch_size = 1
-use_last_w_pivots = False
diff --git a/spaces/ECCV2022/PSG/OpenPSG/configs/_base_/datasets/vg_detection.py b/spaces/ECCV2022/PSG/OpenPSG/configs/_base_/datasets/vg_detection.py
deleted file mode 100644
index d826ecca5ea9c9bfbaf08366b5b2a468c908363b..0000000000000000000000000000000000000000
--- a/spaces/ECCV2022/PSG/OpenPSG/configs/_base_/datasets/vg_detection.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# dataset settings
-custom_imports = dict(imports=[
- 'openpsg.datasets',
- 'openpsg.datasets.pipelines',
-],
- allow_failed_imports=False)
-
-dataset_type = 'SceneGraphDataset'
-ann_file = 'data/vg/data_openpsg.json'
-img_dir = 'data/vg/VG_100K'
-
-img_norm_cfg = dict(mean=[123.675, 116.28, 103.53],
- std=[58.395, 57.12, 57.375],
- to_rgb=True)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadSceneGraphAnnotations', with_bbox=True),
- dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
- dict(type='RandomFlip', flip_ratio=0.5),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='MultiScaleFlipAug',
- img_scale=(1333, 800),
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
-data = dict(samples_per_gpu=2,
- workers_per_gpu=2,
- train=dict(type=dataset_type,
- ann_file=ann_file,
- img_prefix=img_dir,
- pipeline=train_pipeline,
- split='train'),
- val=dict(type=dataset_type,
- ann_file=ann_file,
- img_prefix=img_dir,
- pipeline=test_pipeline,
- split='test'),
- test=dict(type=dataset_type,
- ann_file=ann_file,
- img_prefix=img_dir,
- pipeline=test_pipeline,
- split='test'))
-evaluation = dict(interval=1, metric='bbox')
diff --git a/spaces/ECCV2022/PSG/OpenPSG/configs/imp/panoptic_fpn_r50_fpn_1x_sgdet_psg.py b/spaces/ECCV2022/PSG/OpenPSG/configs/imp/panoptic_fpn_r50_fpn_1x_sgdet_psg.py
deleted file mode 100644
index 1ec83492bfccc1b706723b6de680392f9b0e2c7a..0000000000000000000000000000000000000000
--- a/spaces/ECCV2022/PSG/OpenPSG/configs/imp/panoptic_fpn_r50_fpn_1x_sgdet_psg.py
+++ /dev/null
@@ -1,48 +0,0 @@
-_base_ = [
- '../motifs/panoptic_fpn_r50_fpn_1x_predcls_psg.py',
-]
-
-model = dict(relation_head=dict(
- type='IMPHead',
- head_config=dict(
- # NOTE: Evaluation type
- use_gt_box=False,
- use_gt_label=False,
- num_iter=2,
- ),
-))
-
-evaluation = dict(
- interval=1,
- metric='sgdet',
- relation_mode=True,
- classwise=True,
- iou_thrs=0.5,
- detection_method='pan_seg',
-)
-
-# Change batch size and learning rate
-data = dict(samples_per_gpu=16, )
-# workers_per_gpu=0) # FIXME: Is this the problem?
-optimizer = dict(type='SGD', lr=0.001, momentum=0.9)
-
-# Log config
-project_name = 'openpsg'
-expt_name = 'imp_panoptic_fpn_r50_fpn_1x_sgdet_psg'
-work_dir = f'./work_dirs/{expt_name}'
-
-log_config = dict(
- interval=50,
- hooks=[
- dict(type='TextLoggerHook'),
- # dict(type='TensorboardLoggerHook')
- dict(
- type='WandbLoggerHook',
- init_kwargs=dict(
- project=project_name,
- name=expt_name,
- # config=work_dir + "/cfg.yaml"
- ),
- ),
- ],
-)
diff --git a/spaces/Ekimetrics/Biomap/biomap/dino/vision_transformer.py b/spaces/Ekimetrics/Biomap/biomap/dino/vision_transformer.py
deleted file mode 100644
index 029d66600e272904ce32b9d09faf4ea0a68016b5..0000000000000000000000000000000000000000
--- a/spaces/Ekimetrics/Biomap/biomap/dino/vision_transformer.py
+++ /dev/null
@@ -1,314 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-Mostly copy-paste from timm library.
-https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
-"""
-import math
-from functools import partial
-
-import torch
-import torch.nn as nn
-from dino.utils import trunc_normal_
-
-def drop_path(x, drop_prob: float = 0., training: bool = False):
- if drop_prob == 0. or not training:
- return x
- keep_prob = 1 - drop_prob
- shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
- random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
- random_tensor.floor_() # binarize
- output = x.div(keep_prob) * random_tensor
- return output
-
-
-class DropPath(nn.Module):
- """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
- """
- def __init__(self, drop_prob=None):
- super(DropPath, self).__init__()
- self.drop_prob = drop_prob
-
- def forward(self, x):
- return drop_path(x, self.drop_prob, self.training)
-
-
-class Mlp(nn.Module):
- def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
- super().__init__()
- out_features = out_features or in_features
- hidden_features = hidden_features or in_features
- self.fc1 = nn.Linear(in_features, hidden_features)
- self.act = act_layer()
- self.fc2 = nn.Linear(hidden_features, out_features)
- self.drop = nn.Dropout(drop)
-
- def forward(self, x):
- x = self.fc1(x)
- x = self.act(x)
- x = self.drop(x)
- x = self.fc2(x)
- x = self.drop(x)
- return x
-
-
-class Attention(nn.Module):
- def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
- super().__init__()
- self.num_heads = num_heads
- head_dim = dim // num_heads
- self.scale = qk_scale or head_dim ** -0.5
-
- self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
- self.attn_drop = nn.Dropout(attn_drop)
- self.proj = nn.Linear(dim, dim)
- self.proj_drop = nn.Dropout(proj_drop)
-
- def forward(self, x, return_qkv=False):
- B, N, C = x.shape
- qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
- q, k, v = qkv[0], qkv[1], qkv[2]
-
- attn = (q @ k.transpose(-2, -1)) * self.scale
- attn = attn.softmax(dim=-1)
- attn = self.attn_drop(attn)
-
- x = (attn @ v).transpose(1, 2).reshape(B, N, C)
- x = self.proj(x)
- x = self.proj_drop(x)
- return x,attn, qkv
-
-
-
-class Block(nn.Module):
- def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
- drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
- super().__init__()
- self.norm1 = norm_layer(dim)
- self.attn = Attention(
- dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
- self.norm2 = norm_layer(dim)
- mlp_hidden_dim = int(dim * mlp_ratio)
- self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
-
- def forward(self, x, return_attention=False, return_qkv = False):
- y, attn, qkv = self.attn(self.norm1(x))
- if return_attention:
- return attn
- x = x + self.drop_path(y)
- x = x + self.drop_path(self.mlp(self.norm2(x)))
- if return_qkv:
- return x,attn, qkv
- return x
-
-
-class PatchEmbed(nn.Module):
- """ Image to Patch Embedding
- """
- def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
- super().__init__()
- num_patches = (img_size // patch_size) * (img_size // patch_size)
- self.img_size = img_size
- self.patch_size = patch_size
- self.num_patches = num_patches
-
- self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
-
- def forward(self, x):
- B, C, H, W = x.shape
- x = self.proj(x).flatten(2).transpose(1, 2)
- return x
-
-
-class VisionTransformer(nn.Module):
- """ Vision Transformer """
- def __init__(self, img_size=[224], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12,
- num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
- drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs):
- super().__init__()
-
- self.num_features = self.embed_dim = embed_dim
-
- self.patch_embed = PatchEmbed(
- img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
- num_patches = self.patch_embed.num_patches
-
- self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
- self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
- self.pos_drop = nn.Dropout(p=drop_rate)
-
- dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
- self.blocks = nn.ModuleList([
- Block(
- dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
- drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
- for i in range(depth)])
- self.norm = norm_layer(embed_dim)
-
- # Classifier head
- self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
-
- trunc_normal_(self.pos_embed, std=.02)
- trunc_normal_(self.cls_token, std=.02)
- self.apply(self._init_weights)
-
- def _init_weights(self, m):
- if isinstance(m, nn.Linear):
- trunc_normal_(m.weight, std=.02)
- if isinstance(m, nn.Linear) and m.bias is not None:
- nn.init.constant_(m.bias, 0)
- elif isinstance(m, nn.LayerNorm):
- nn.init.constant_(m.bias, 0)
- nn.init.constant_(m.weight, 1.0)
-
- def interpolate_pos_encoding(self, x, w, h):
- npatch = x.shape[1] - 1
- N = self.pos_embed.shape[1] - 1
- if npatch == N and w == h:
- return self.pos_embed
- class_pos_embed = self.pos_embed[:, 0]
- patch_pos_embed = self.pos_embed[:, 1:]
- dim = x.shape[-1]
- w0 = w // self.patch_embed.patch_size
- h0 = h // self.patch_embed.patch_size
- # we add a small number to avoid floating point error in the interpolation
- # see discussion at https://github.com/facebookresearch/dino/issues/8
- w0, h0 = w0 + 0.1, h0 + 0.1
- patch_pos_embed = nn.functional.interpolate(
- patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
- scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
- mode='bicubic',
- )
- assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
- patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
- return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
-
- def prepare_tokens(self, x):
- B, nc, w, h = x.shape
- x = self.patch_embed(x) # patch linear embedding
-
- # add the [CLS] token to the embed patch tokens
- cls_tokens = self.cls_token.expand(B, -1, -1)
- x = torch.cat((cls_tokens, x), dim=1)
-
- # add positional encoding to each token
- x = x + self.interpolate_pos_encoding(x, w, h)
-
- return self.pos_drop(x)
-
- def forward(self, x):
- x = self.prepare_tokens(x)
- for blk in self.blocks:
- x = blk(x)
- x = self.norm(x)
- return x[:, 0]
-
- def forward_feats(self, x):
- x = self.prepare_tokens(x)
- for blk in self.blocks:
- x = blk(x)
- x = self.norm(x)
- return x
-
- def get_intermediate_feat(self, x, n=1):
- x = self.prepare_tokens(x)
- # we return the output tokens from the `n` last blocks
- feat = []
- attns = []
- qkvs = []
- for i, blk in enumerate(self.blocks):
- x,attn,qkv = blk(x, return_qkv=True)
- if len(self.blocks) - i <= n:
- feat.append(self.norm(x))
- qkvs.append(qkv)
- attns.append(attn)
- return feat, attns, qkvs
-
- def get_last_selfattention(self, x):
- x = self.prepare_tokens(x)
- for i, blk in enumerate(self.blocks):
- if i < len(self.blocks) - 1:
- x = blk(x)
- else:
- # return attention of the last block
- return blk(x, return_attention=True)
-
- def get_intermediate_layers(self, x, n=1):
- x = self.prepare_tokens(x)
- # we return the output tokens from the `n` last blocks
- output = []
- for i, blk in enumerate(self.blocks):
- x = blk(x)
- if len(self.blocks) - i <= n:
- output.append(self.norm(x))
- return output
-
-
-def vit_tiny(patch_size=16, **kwargs):
- model = VisionTransformer(
- patch_size=patch_size, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4,
- qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
- return model
-
-
-def vit_small(patch_size=16, **kwargs):
- model = VisionTransformer(
- patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4,
- qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
- return model
-
-
-def vit_base(patch_size=16, **kwargs):
- model = VisionTransformer(
- patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
- qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
- return model
-
-
-class DINOHead(nn.Module):
- def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256):
- super().__init__()
- nlayers = max(nlayers, 1)
- if nlayers == 1:
- self.mlp = nn.Linear(in_dim, bottleneck_dim)
- else:
- layers = [nn.Linear(in_dim, hidden_dim)]
- if use_bn:
- layers.append(nn.BatchNorm1d(hidden_dim))
- layers.append(nn.GELU())
- for _ in range(nlayers - 2):
- layers.append(nn.Linear(hidden_dim, hidden_dim))
- if use_bn:
- layers.append(nn.BatchNorm1d(hidden_dim))
- layers.append(nn.GELU())
- layers.append(nn.Linear(hidden_dim, bottleneck_dim))
- self.mlp = nn.Sequential(*layers)
- self.apply(self._init_weights)
- self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))
- self.last_layer.weight_g.data.fill_(1)
- if norm_last_layer:
- self.last_layer.weight_g.requires_grad = False
-
- def _init_weights(self, m):
- if isinstance(m, nn.Linear):
- trunc_normal_(m.weight, std=.02)
- if isinstance(m, nn.Linear) and m.bias is not None:
- nn.init.constant_(m.bias, 0)
-
- def forward(self, x):
- x = self.mlp(x)
- x = nn.functional.normalize(x, dim=-1, p=2)
- x = self.last_layer(x)
- return x
diff --git a/spaces/FaceOnLive/Face-Liveness-Detection-SDK/app.py b/spaces/FaceOnLive/Face-Liveness-Detection-SDK/app.py
deleted file mode 100644
index 9f3ba5ef13d58bd0a540afaab48d32c73a71367c..0000000000000000000000000000000000000000
--- a/spaces/FaceOnLive/Face-Liveness-Detection-SDK/app.py
+++ /dev/null
@@ -1,115 +0,0 @@
-import sys
-sys.path.append('.')
-
-from flask import Flask, request, jsonify
-from time import gmtime, strftime
-import os
-import base64
-import json
-import cv2
-import numpy as np
-
-from facewrapper.facewrapper import ttv_version
-from facewrapper.facewrapper import ttv_get_hwid
-from facewrapper.facewrapper import ttv_init
-from facewrapper.facewrapper import ttv_init_offline
-from facewrapper.facewrapper import ttv_detect_face
-
-app = Flask(__name__)
-
-app.config['SITE'] = "http://0.0.0.0:8000/"
-app.config['DEBUG'] = False
-
-licenseKey = os.environ.get("LICENSE_KEY")
-licensePath = "license.txt"
-modelFolder = os.path.abspath(os.path.dirname(__file__)) + '/facewrapper/dict'
-
-version = ttv_version()
-print("version: ", version.decode('utf-8'))
-
-ret = ttv_init(modelFolder.encode('utf-8'), licenseKey.encode('utf-8'))
-if ret != 0:
- print(f"online init failed: {ret}");
-
- hwid = ttv_get_hwid()
- print("hwid: ", hwid.decode('utf-8'))
-
- ret = ttv_init_offline(modelFolder.encode('utf-8'), licensePath.encode('utf-8'))
- if ret != 0:
- print(f"offline init failed: {ret}")
- exit(-1)
- else:
- print(f"offline init ok")
-
-else:
- print(f"online init ok")
-
-@app.route('/api/liveness', methods=['POST'])
-def check_liveness():
- file = request.files['image']
- image = cv2.imdecode(np.fromstring(file.read(), np.uint8), cv2.IMREAD_COLOR)
-
- faceRect = np.zeros([4], dtype=np.int32)
- livenessScore = np.zeros([1], dtype=np.double)
- angles = np.zeros([3], dtype=np.double)
- ret = ttv_detect_face(image, image.shape[1], image.shape[0], faceRect, livenessScore, angles)
- if ret == -1:
- result = "license error!"
- elif ret == -2:
- result = "init error!"
- elif ret == 0:
- result = "no face detected!"
- elif ret > 1:
- result = "multiple face detected!"
- elif faceRect[0] < 0 or faceRect[1] < 0 or faceRect[2] >= image.shape[1] or faceRect[2] >= image.shape[0]:
- result = "faace is in boundary!"
- elif livenessScore[0] > 0.5:
- result = "genuine"
- else:
- result = "spoof"
-
- status = "ok"
- response = jsonify({"status": status, "data": {"result": result, "face_rect": {"x": int(faceRect[0]), "y": int(faceRect[1]), "w": int(faceRect[2] - faceRect[0] + 1), "h" : int(faceRect[3] - faceRect[1] + 1)}, "liveness_score": livenessScore[0],
- "angles": {"yaw": angles[0], "roll": angles[1], "pitch": angles[2]}}})
-
- response.status_code = 200
- response.headers["Content-Type"] = "application/json; charset=utf-8"
- return response
-
-@app.route('/api/liveness_base64', methods=['POST'])
-def check_liveness_base64():
- content = request.get_json()
- imageBase64 = content['image']
- image = cv2.imdecode(np.frombuffer(base64.b64decode(imageBase64), dtype=np.uint8), cv2.IMREAD_COLOR)
-
- faceRect = np.zeros([4], dtype=np.int32)
- livenessScore = np.zeros([1], dtype=np.double)
- angles = np.zeros([3], dtype=np.double)
- ret = ttv_detect_face(image, image.shape[1], image.shape[0], faceRect, livenessScore, angles)
- if ret == -1:
- result = "license error!"
- elif ret == -2:
- result = "init error!"
- elif ret == 0:
- result = "no face detected!"
- elif ret > 1:
- result = "multiple face detected!"
- elif faceRect[0] < 0 or faceRect[1] < 0 or faceRect[2] >= image.shape[1] or faceRect[2] >= image.shape[0]:
- result = "faace is in boundary!"
- elif livenessScore[0] > 0.5:
- result = "genuine"
- else:
- result = "spoof"
-
- status = "ok"
- response = jsonify({"status": status, "data": {"result": result, "face_rect": {"x": int(faceRect[0]), "y": int(faceRect[1]), "w": int(faceRect[2] - faceRect[0] + 1), "h" : int(faceRect[3] - faceRect[1] + 1)}, "liveness_score": livenessScore[0],
- "angles": {"yaw": angles[0], "roll": angles[1], "pitch": angles[2]}}})
-
- response.status_code = 200
- response.headers["Content-Type"] = "application/json; charset=utf-8"
- return response
-
-
-if __name__ == '__main__':
- port = int(os.environ.get("PORT", 8000))
- app.run(host='0.0.0.0', port=port)
diff --git a/spaces/Fazzie/PokemonGAI/README.md b/spaces/Fazzie/PokemonGAI/README.md
deleted file mode 100644
index 6529399fed542420d438917ac10f4bb18da908ed..0000000000000000000000000000000000000000
--- a/spaces/Fazzie/PokemonGAI/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: PokemonGAI
-emoji: 🏢
-colorFrom: gray
-colorTo: blue
-sdk: gradio
-sdk_version: 3.24.1
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/FrankZxShen/vits-fast-finetuning-pcr/attentions.py b/spaces/FrankZxShen/vits-fast-finetuning-pcr/attentions.py
deleted file mode 100644
index 9f92f8ead13bc189c0cb5af261f29a9dc5be71df..0000000000000000000000000000000000000000
--- a/spaces/FrankZxShen/vits-fast-finetuning-pcr/attentions.py
+++ /dev/null
@@ -1,307 +0,0 @@
-import copy
-import math
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-import commons
-import modules
-from modules import LayerNorm
-
-
-
-class Encoder(nn.Module):
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.window_size = window_size
-
- self.drop = nn.Dropout(p_dropout)
- self.attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask):
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.attn_layers[i](x, x, attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class Decoder(nn.Module):
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
-
- self.drop = nn.Dropout(p_dropout)
- self.self_attn_layers = nn.ModuleList()
- self.norm_layers_0 = nn.ModuleList()
- self.encdec_attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
- self.norm_layers_0.append(LayerNorm(hidden_channels))
- self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask, h, h_mask):
- """
- x: decoder input
- h: encoder output
- """
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.self_attn_layers[i](x, x, self_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_0[i](x + y)
-
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-
-class MultiHeadAttention(nn.Module):
- def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
- super().__init__()
- assert channels % n_heads == 0
-
- self.channels = channels
- self.out_channels = out_channels
- self.n_heads = n_heads
- self.p_dropout = p_dropout
- self.window_size = window_size
- self.heads_share = heads_share
- self.block_length = block_length
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
- self.attn = None
-
- self.k_channels = channels // n_heads
- self.conv_q = nn.Conv1d(channels, channels, 1)
- self.conv_k = nn.Conv1d(channels, channels, 1)
- self.conv_v = nn.Conv1d(channels, channels, 1)
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
- self.drop = nn.Dropout(p_dropout)
-
- if window_size is not None:
- n_heads_rel = 1 if heads_share else n_heads
- rel_stddev = self.k_channels**-0.5
- self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
- self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
-
- nn.init.xavier_uniform_(self.conv_q.weight)
- nn.init.xavier_uniform_(self.conv_k.weight)
- nn.init.xavier_uniform_(self.conv_v.weight)
- if proximal_init:
- with torch.no_grad():
- self.conv_k.weight.copy_(self.conv_q.weight)
- self.conv_k.bias.copy_(self.conv_q.bias)
-
- def forward(self, x, c, attn_mask=None):
- q = self.conv_q(x)
- k = self.conv_k(c)
- v = self.conv_v(c)
-
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
-
- x = self.conv_o(x)
- return x
-
- def attention(self, query, key, value, mask=None):
- # reshape [b, d, t] -> [b, n_h, t, d_k]
- b, d, t_s, t_t = (*key.size(), query.size(2))
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
-
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
- if self.window_size is not None:
- assert t_s == t_t, "Relative attention is only available for self-attention."
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
- rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
- scores_local = self._relative_position_to_absolute_position(rel_logits)
- scores = scores + scores_local
- if self.proximal_bias:
- assert t_s == t_t, "Proximal bias is only available for self-attention."
- scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
- if mask is not None:
- scores = scores.masked_fill(mask == 0, -1e4)
- if self.block_length is not None:
- assert t_s == t_t, "Local attention is only available for self-attention."
- block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
- scores = scores.masked_fill(block_mask == 0, -1e4)
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
- p_attn = self.drop(p_attn)
- output = torch.matmul(p_attn, value)
- if self.window_size is not None:
- relative_weights = self._absolute_position_to_relative_position(p_attn)
- value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
- output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
- output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
- return output, p_attn
-
- def _matmul_with_relative_values(self, x, y):
- """
- x: [b, h, l, m]
- y: [h or 1, m, d]
- ret: [b, h, l, d]
- """
- ret = torch.matmul(x, y.unsqueeze(0))
- return ret
-
- def _matmul_with_relative_keys(self, x, y):
- """
- x: [b, h, l, d]
- y: [h or 1, m, d]
- ret: [b, h, l, m]
- """
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
- return ret
-
- def _get_relative_embeddings(self, relative_embeddings, length):
- max_relative_position = 2 * self.window_size + 1
- # Pad first before slice to avoid using cond ops.
- pad_length = max(length - (self.window_size + 1), 0)
- slice_start_position = max((self.window_size + 1) - length, 0)
- slice_end_position = slice_start_position + 2 * length - 1
- if pad_length > 0:
- padded_relative_embeddings = F.pad(
- relative_embeddings,
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
- else:
- padded_relative_embeddings = relative_embeddings
- used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
- return used_relative_embeddings
-
- def _relative_position_to_absolute_position(self, x):
- """
- x: [b, h, l, 2*l-1]
- ret: [b, h, l, l]
- """
- batch, heads, length, _ = x.size()
- # Concat columns of pad to shift from relative to absolute indexing.
- x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
-
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
- x_flat = x.view([batch, heads, length * 2 * length])
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
-
- # Reshape and slice out the padded elements.
- x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
- return x_final
-
- def _absolute_position_to_relative_position(self, x):
- """
- x: [b, h, l, l]
- ret: [b, h, l, 2*l-1]
- """
- batch, heads, length, _ = x.size()
- # padd along column
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
- x_flat = x.view([batch, heads, length**2 + length*(length -1)])
- # add 0's in the beginning that will skew the elements after reshape
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
- x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
- return x_final
-
- def _attention_bias_proximal(self, length):
- """Bias for self-attention to encourage attention to close positions.
- Args:
- length: an integer scalar.
- Returns:
- a Tensor with shape [1, 1, length, length]
- """
- r = torch.arange(length, dtype=torch.float32)
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
-
-
-class FFN(nn.Module):
- def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.activation = activation
- self.causal = causal
-
- if causal:
- self.padding = self._causal_padding
- else:
- self.padding = self._same_padding
-
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
- # self.conv_1 = layers.Conv1d(in_channels, filter_channels, kernel_size, r = 4, lora_alpha = 16, lora_dropout = 0.05)
- # self.conv_2 = layers.Conv1d(filter_channels, out_channels, kernel_size, r = 4, lora_alpha = 16, lora_dropout = 0.05)
- self.drop = nn.Dropout(p_dropout)
-
- def forward(self, x, x_mask):
- x = self.conv_1(self.padding(x * x_mask))
- if self.activation == "gelu":
- x = x * torch.sigmoid(1.702 * x)
- else:
- x = torch.relu(x)
- x = self.drop(x)
- x = self.conv_2(self.padding(x * x_mask))
- return x * x_mask
-
- def _causal_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = self.kernel_size - 1
- pad_r = 0
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
-
- def _same_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = (self.kernel_size - 1) // 2
- pad_r = self.kernel_size // 2
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
diff --git a/spaces/Gmq-x/gpt-academic/request_llm/bridge_chatgpt.py b/spaces/Gmq-x/gpt-academic/request_llm/bridge_chatgpt.py
deleted file mode 100644
index 8c915c2a1c8701d08a4cd05f5d0c80683d0cd346..0000000000000000000000000000000000000000
--- a/spaces/Gmq-x/gpt-academic/request_llm/bridge_chatgpt.py
+++ /dev/null
@@ -1,272 +0,0 @@
-# 借鉴了 https://github.com/GaiZhenbiao/ChuanhuChatGPT 项目
-
-"""
- 该文件中主要包含三个函数
-
- 不具备多线程能力的函数:
- 1. predict: 正常对话时使用,具备完备的交互功能,不可多线程
-
- 具备多线程调用能力的函数
- 2. predict_no_ui:高级实验性功能模块调用,不会实时显示在界面上,参数简单,可以多线程并行,方便实现复杂的功能逻辑
- 3. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程
-"""
-
-import json
-import time
-import gradio as gr
-import logging
-import traceback
-import requests
-import importlib
-
-# config_private.py放自己的秘密如API和代理网址
-# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
-from toolbox import get_conf, update_ui, is_any_api_key, select_api_key
-proxies, API_KEY, TIMEOUT_SECONDS, MAX_RETRY = \
- get_conf('proxies', 'API_KEY', 'TIMEOUT_SECONDS', 'MAX_RETRY')
-
-timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
- '网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
-
-def get_full_error(chunk, stream_response):
- """
- 获取完整的从Openai返回的报错
- """
- while True:
- try:
- chunk += next(stream_response)
- except:
- break
- return chunk
-
-
-def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
- """
- 发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
- inputs:
- 是本次问询的输入
- sys_prompt:
- 系统静默prompt
- llm_kwargs:
- chatGPT的内部调优参数
- history:
- 是之前的对话列表
- observe_window = None:
- 用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗
- """
- watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
- headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True)
- retry = 0
- while True:
- try:
- # make a POST request to the API endpoint, stream=False
- from .bridge_all import model_info
- endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
- response = requests.post(endpoint, headers=headers, proxies=proxies,
- json=payload, stream=True, timeout=TIMEOUT_SECONDS); break
- except requests.exceptions.ReadTimeout as e:
- retry += 1
- traceback.print_exc()
- if retry > MAX_RETRY: raise TimeoutError
- if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
-
- stream_response = response.iter_lines()
- result = ''
- while True:
- try: chunk = next(stream_response).decode()
- except StopIteration:
- break
- except requests.exceptions.ConnectionError:
- chunk = next(stream_response).decode() # 失败了,重试一次?再失败就没办法了。
- if len(chunk)==0: continue
- if not chunk.startswith('data:'):
- error_msg = get_full_error(chunk.encode('utf8'), stream_response).decode()
- if "reduce the length" in error_msg:
- raise ConnectionAbortedError("OpenAI拒绝了请求:" + error_msg)
- else:
- raise RuntimeError("OpenAI拒绝了请求:" + error_msg)
- if ('data: [DONE]' in chunk): break # api2d 正常完成
- json_data = json.loads(chunk.lstrip('data:'))['choices'][0]
- delta = json_data["delta"]
- if len(delta) == 0: break
- if "role" in delta: continue
- if "content" in delta:
- result += delta["content"]
- if not console_slience: print(delta["content"], end='')
- if observe_window is not None:
- # 观测窗,把已经获取的数据显示出去
- if len(observe_window) >= 1: observe_window[0] += delta["content"]
- # 看门狗,如果超过期限没有喂狗,则终止
- if len(observe_window) >= 2:
- if (time.time()-observe_window[1]) > watch_dog_patience:
- raise RuntimeError("用户取消了程序。")
- else: raise RuntimeError("意外Json结构:"+delta)
- if json_data['finish_reason'] == 'length':
- raise ConnectionAbortedError("正常结束,但显示Token不足,导致输出不完整,请削减单次输入的文本量。")
- return result
-
-
-def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
- """
- 发送至chatGPT,流式获取输出。
- 用于基础的对话功能。
- inputs 是本次问询的输入
- top_p, temperature是chatGPT的内部调优参数
- history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
- chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
- additional_fn代表点击的哪个按钮,按钮见functional.py
- """
- if is_any_api_key(inputs):
- chatbot._cookies['api_key'] = inputs
- chatbot.append(("输入已识别为openai的api_key", "api_key已导入"))
- yield from update_ui(chatbot=chatbot, history=history, msg="api_key已导入") # 刷新界面
- return
- elif not is_any_api_key(chatbot._cookies['api_key']):
- chatbot.append((inputs, "缺少api_key。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。"))
- yield from update_ui(chatbot=chatbot, history=history, msg="缺少api_key") # 刷新界面
- return
-
- if additional_fn is not None:
- import core_functional
- importlib.reload(core_functional) # 热更新prompt
- core_functional = core_functional.get_core_functions()
- if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
- inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
-
- raw_input = inputs
- logging.info(f'[raw_input] {raw_input}')
- chatbot.append((inputs, ""))
- yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
-
- try:
- headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt, stream)
- except RuntimeError as e:
- chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。")
- yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面
- return
-
- history.append(inputs); history.append(" ")
-
- retry = 0
- while True:
- try:
- # make a POST request to the API endpoint, stream=True
- from .bridge_all import model_info
- endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
- response = requests.post(endpoint, headers=headers, proxies=proxies,
- json=payload, stream=True, timeout=TIMEOUT_SECONDS);break
- except:
- retry += 1
- chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg))
- retry_msg = f",正在重试 ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else ""
- yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面
- if retry > MAX_RETRY: raise TimeoutError
-
- gpt_replying_buffer = ""
-
- is_head_of_the_stream = True
- if stream:
- stream_response = response.iter_lines()
- while True:
- chunk = next(stream_response)
- # print(chunk.decode()[6:])
- if is_head_of_the_stream and (r'"object":"error"' not in chunk.decode()):
- # 数据流的第一帧不携带content
- is_head_of_the_stream = False; continue
-
- if chunk:
- try:
- chunk_decoded = chunk.decode()
- # 前者API2D的
- if ('data: [DONE]' in chunk_decoded) or (len(json.loads(chunk_decoded[6:])['choices'][0]["delta"]) == 0):
- # 判定为数据流的结束,gpt_replying_buffer也写完了
- logging.info(f'[response] {gpt_replying_buffer}')
- break
- # 处理数据流的主体
- chunkjson = json.loads(chunk_decoded[6:])
- status_text = f"finish_reason: {chunkjson['choices'][0]['finish_reason']}"
- # 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出
- gpt_replying_buffer = gpt_replying_buffer + json.loads(chunk_decoded[6:])['choices'][0]["delta"]["content"]
- history[-1] = gpt_replying_buffer
- chatbot[-1] = (history[-2], history[-1])
- yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
-
- except Exception as e:
- traceback.print_exc()
- yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面
- chunk = get_full_error(chunk, stream_response)
- chunk_decoded = chunk.decode()
- error_msg = chunk_decoded
- if "reduce the length" in error_msg:
- chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长,或历史数据过长. 历史缓存数据现已释放,您可以请再次尝试.")
- history = [] # 清除历史
- elif "does not exist" in error_msg:
- chatbot[-1] = (chatbot[-1][0], f"[Local Message] Model {llm_kwargs['llm_model']} does not exist. 模型不存在,或者您没有获得体验资格.")
- elif "Incorrect API key" in error_msg:
- chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key. OpenAI以提供了不正确的API_KEY为由,拒绝服务.")
- elif "exceeded your current quota" in error_msg:
- chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. OpenAI以账户额度不足为由,拒绝服务.")
- elif "bad forward key" in error_msg:
- chatbot[-1] = (chatbot[-1][0], "[Local Message] Bad forward key. API2D账户额度不足.")
- else:
- from toolbox import regular_txt_to_markdown
- tb_str = '```\n' + traceback.format_exc() + '```'
- chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded[4:])}")
- yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面
- return
-
-def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
- """
- 整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
- """
- if not is_any_api_key(llm_kwargs['api_key']):
- raise AssertionError("你提供了错误的API_KEY。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。")
-
- api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
-
- headers = {
- "Content-Type": "application/json",
- "Authorization": f"Bearer {api_key}"
- }
-
- conversation_cnt = len(history) // 2
-
- messages = [{"role": "system", "content": system_prompt}]
- if conversation_cnt:
- for index in range(0, 2*conversation_cnt, 2):
- what_i_have_asked = {}
- what_i_have_asked["role"] = "user"
- what_i_have_asked["content"] = history[index]
- what_gpt_answer = {}
- what_gpt_answer["role"] = "assistant"
- what_gpt_answer["content"] = history[index+1]
- if what_i_have_asked["content"] != "":
- if what_gpt_answer["content"] == "": continue
- if what_gpt_answer["content"] == timeout_bot_msg: continue
- messages.append(what_i_have_asked)
- messages.append(what_gpt_answer)
- else:
- messages[-1]['content'] = what_gpt_answer['content']
-
- what_i_ask_now = {}
- what_i_ask_now["role"] = "user"
- what_i_ask_now["content"] = inputs
- messages.append(what_i_ask_now)
-
- payload = {
- "model": llm_kwargs['llm_model'].strip('api2d-'),
- "messages": messages,
- "temperature": llm_kwargs['temperature'], # 1.0,
- "top_p": llm_kwargs['top_p'], # 1.0,
- "n": 1,
- "stream": stream,
- "presence_penalty": 0,
- "frequency_penalty": 0,
- }
- try:
- print(f" {llm_kwargs['llm_model']} : {conversation_cnt} : {inputs[:100]} ..........")
- except:
- print('输入中可能存在乱码。')
- return headers,payload
-
-
diff --git a/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/RealESRGANv030/setup.py b/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/RealESRGANv030/setup.py
deleted file mode 100644
index 32a4c9c9b72a15b1a4e1ad0cc83308fb9f465426..0000000000000000000000000000000000000000
--- a/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/RealESRGANv030/setup.py
+++ /dev/null
@@ -1,118 +0,0 @@
-#!/usr/bin/env python
-
-from setuptools import find_packages, setup
-
-import os
-import subprocess
-import time
-
-version_file = "realesrgan/version.py"
-
-
-def readme():
- with open("README.md", encoding="utf-8") as f:
- content = f.read()
- return content
-
-
-def get_git_hash():
- def _minimal_ext_cmd(cmd):
- # construct minimal environment
- env = {}
- for k in ["SYSTEMROOT", "PATH", "HOME"]:
- v = os.environ.get(k)
- if v is not None:
- env[k] = v
- # LANGUAGE is used on win32
- env["LANGUAGE"] = "C"
- env["LANG"] = "C"
- env["LC_ALL"] = "C"
- out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
- return out
-
- try:
- out = _minimal_ext_cmd(["git", "rev-parse", "HEAD"])
- sha = out.strip().decode("ascii")
- except OSError:
- sha = "unknown"
-
- return sha
-
-
-def get_hash():
- if os.path.exists(".git"):
- sha = get_git_hash()[:7]
- else:
- sha = "unknown"
-
- return sha
-
-
-def write_version_py():
- content = """# GENERATED VERSION FILE
-# TIME: {}
-__version__ = '{}'
-__gitsha__ = '{}'
-version_info = ({})
-"""
- sha = get_hash()
- with open("VERSION", "r") as f:
- SHORT_VERSION = f.read().strip()
- VERSION_INFO = ", ".join(
- [x if x.isdigit() else f'"{x}"' for x in SHORT_VERSION.split(".")]
- )
-
- version_file_str = content.format(time.asctime(), SHORT_VERSION, sha, VERSION_INFO)
- with open(version_file, "w") as f:
- f.write(version_file_str)
-
-
-def get_version():
- with open(version_file, "r") as f:
- exec(compile(f.read(), version_file, "exec"))
- return locals()["__version__"]
-
-
-def get_requirements(filename="requirements.txt"):
- here = os.path.dirname(os.path.realpath(__file__))
- with open(os.path.join(here, filename), "r") as f:
- requires = [line.replace("\n", "") for line in f.readlines()]
- return requires
-
-
-if __name__ == "__main__":
- write_version_py()
- setup(
- name="realesrgan",
- version=get_version(),
- description="Real-ESRGAN aims at developing Practical Algorithms for General Image Restoration",
- long_description=readme(),
- long_description_content_type="text/markdown",
- author="Xintao Wang",
- author_email="xintao.wang@outlook.com",
- keywords="computer vision, pytorch, image restoration, super-resolution, esrgan, real-esrgan",
- url="https://github.com/xinntao/Real-ESRGAN",
- include_package_data=True,
- packages=find_packages(
- exclude=(
- "options",
- "datasets",
- "experiments",
- "results",
- "tb_logger",
- "wandb",
- )
- ),
- classifiers=[
- "Development Status :: 4 - Beta",
- "License :: OSI Approved :: Apache Software License",
- "Operating System :: OS Independent",
- "Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.7",
- "Programming Language :: Python :: 3.8",
- ],
- license="BSD-3-Clause License",
- setup_requires=["cython", "numpy"],
- install_requires=get_requirements(),
- zip_safe=False,
- )
diff --git a/spaces/Gradio-Blocks/ViTPose/mmdet_configs/README.md b/spaces/Gradio-Blocks/ViTPose/mmdet_configs/README.md
deleted file mode 100644
index b180151a3f1904a7636d0719aad751754dfe4a3b..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/ViTPose/mmdet_configs/README.md
+++ /dev/null
@@ -1,2 +0,0 @@
-`configs.tar` is a tarball of https://github.com/open-mmlab/mmdetection/tree/v2.24.1/configs.
-The license file of the mmdetection is also included in this directory.
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py
deleted file mode 100644
index 89f387641207512ae1b1c91ca56965004e5eb868..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py
+++ /dev/null
@@ -1,105 +0,0 @@
-_base_ = [
- '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
-]
-
-# model settings
-model = dict(
- type='CornerNet',
- backbone=dict(
- type='HourglassNet',
- downsample_times=5,
- num_stacks=2,
- stage_channels=[256, 256, 384, 384, 384, 512],
- stage_blocks=[2, 2, 2, 2, 2, 4],
- norm_cfg=dict(type='BN', requires_grad=True)),
- neck=None,
- bbox_head=dict(
- type='CornerHead',
- num_classes=80,
- in_channels=256,
- num_feat_levels=2,
- corner_emb_channels=1,
- loss_heatmap=dict(
- type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
- loss_embedding=dict(
- type='AssociativeEmbeddingLoss',
- pull_weight=0.10,
- push_weight=0.10),
- loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)),
- # training and testing settings
- train_cfg=None,
- test_cfg=dict(
- corner_topk=100,
- local_maximum_kernel=3,
- distance_threshold=0.5,
- score_thr=0.05,
- max_per_img=100,
- nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
-# data settings
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-train_pipeline = [
- dict(type='LoadImageFromFile', to_float32=True),
- dict(type='LoadAnnotations', with_bbox=True),
- dict(
- type='PhotoMetricDistortion',
- brightness_delta=32,
- contrast_range=(0.5, 1.5),
- saturation_range=(0.5, 1.5),
- hue_delta=18),
- dict(
- type='RandomCenterCropPad',
- crop_size=(511, 511),
- ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
- test_mode=False,
- test_pad_mode=None,
- **img_norm_cfg),
- dict(type='Resize', img_scale=(511, 511), keep_ratio=False),
- dict(type='RandomFlip', flip_ratio=0.5),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile', to_float32=True),
- dict(
- type='MultiScaleFlipAug',
- scale_factor=1.0,
- flip=True,
- transforms=[
- dict(type='Resize'),
- dict(
- type='RandomCenterCropPad',
- crop_size=None,
- ratios=None,
- border=None,
- test_mode=True,
- test_pad_mode=['logical_or', 127],
- **img_norm_cfg),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(
- type='Collect',
- keys=['img'],
- meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
- 'scale_factor', 'flip', 'img_norm_cfg', 'border')),
- ])
-]
-data = dict(
- samples_per_gpu=5,
- workers_per_gpu=3,
- train=dict(pipeline=train_pipeline),
- val=dict(pipeline=test_pipeline),
- test=dict(pipeline=test_pipeline))
-# optimizer
-optimizer = dict(type='Adam', lr=0.0005)
-optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
-# learning policy
-lr_config = dict(
- policy='step',
- warmup='linear',
- warmup_iters=500,
- warmup_ratio=1.0 / 3,
- step=[180])
-runner = dict(type='EpochBasedRunner', max_epochs=210)
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/detectors/grid_rcnn.py b/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/detectors/grid_rcnn.py
deleted file mode 100644
index b6145a1464cd940bd4f98eaa15f6f9ecf6a10a20..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/detectors/grid_rcnn.py
+++ /dev/null
@@ -1,29 +0,0 @@
-from ..builder import DETECTORS
-from .two_stage import TwoStageDetector
-
-
-@DETECTORS.register_module()
-class GridRCNN(TwoStageDetector):
- """Grid R-CNN.
-
- This detector is the implementation of:
- - Grid R-CNN (https://arxiv.org/abs/1811.12030)
- - Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/1906.05688)
- """
-
- def __init__(self,
- backbone,
- rpn_head,
- roi_head,
- train_cfg,
- test_cfg,
- neck=None,
- pretrained=None):
- super(GridRCNN, self).__init__(
- backbone=backbone,
- neck=neck,
- rpn_head=rpn_head,
- roi_head=roi_head,
- train_cfg=train_cfg,
- test_cfg=test_cfg,
- pretrained=pretrained)
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/dnlnet/dnl_r101-d8_512x1024_80k_cityscapes.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/dnlnet/dnl_r101-d8_512x1024_80k_cityscapes.py
deleted file mode 100644
index 0f2e1b6da7e63841f4429b1caed5fbe9d537c4f8..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/dnlnet/dnl_r101-d8_512x1024_80k_cityscapes.py
+++ /dev/null
@@ -1,2 +0,0 @@
-_base_ = './dnl_r50-d8_512x1024_80k_cityscapes.py'
-model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/upernet/upernet_r101_512x1024_80k_cityscapes.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/upernet/upernet_r101_512x1024_80k_cityscapes.py
deleted file mode 100644
index 420ca2e42836099213c1f91cb925088cfe7c1269..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/upernet/upernet_r101_512x1024_80k_cityscapes.py
+++ /dev/null
@@ -1,2 +0,0 @@
-_base_ = './upernet_r50_512x1024_80k_cityscapes.py'
-model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/core/seg/sampler/ohem_pixel_sampler.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/core/seg/sampler/ohem_pixel_sampler.py
deleted file mode 100644
index 88bb10d44026ba9f21756eaea9e550841cd59b9f..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/core/seg/sampler/ohem_pixel_sampler.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import torch
-import torch.nn.functional as F
-
-from ..builder import PIXEL_SAMPLERS
-from .base_pixel_sampler import BasePixelSampler
-
-
-@PIXEL_SAMPLERS.register_module()
-class OHEMPixelSampler(BasePixelSampler):
- """Online Hard Example Mining Sampler for segmentation.
-
- Args:
- context (nn.Module): The context of sampler, subclass of
- :obj:`BaseDecodeHead`.
- thresh (float, optional): The threshold for hard example selection.
- Below which, are prediction with low confidence. If not
- specified, the hard examples will be pixels of top ``min_kept``
- loss. Default: None.
- min_kept (int, optional): The minimum number of predictions to keep.
- Default: 100000.
- """
-
- def __init__(self, context, thresh=None, min_kept=100000):
- super(OHEMPixelSampler, self).__init__()
- self.context = context
- assert min_kept > 1
- self.thresh = thresh
- self.min_kept = min_kept
-
- def sample(self, seg_logit, seg_label):
- """Sample pixels that have high loss or with low prediction confidence.
-
- Args:
- seg_logit (torch.Tensor): segmentation logits, shape (N, C, H, W)
- seg_label (torch.Tensor): segmentation label, shape (N, 1, H, W)
-
- Returns:
- torch.Tensor: segmentation weight, shape (N, H, W)
- """
- with torch.no_grad():
- assert seg_logit.shape[2:] == seg_label.shape[2:]
- assert seg_label.shape[1] == 1
- seg_label = seg_label.squeeze(1).long()
- batch_kept = self.min_kept * seg_label.size(0)
- valid_mask = seg_label != self.context.ignore_index
- seg_weight = seg_logit.new_zeros(size=seg_label.size())
- valid_seg_weight = seg_weight[valid_mask]
- if self.thresh is not None:
- seg_prob = F.softmax(seg_logit, dim=1)
-
- tmp_seg_label = seg_label.clone().unsqueeze(1)
- tmp_seg_label[tmp_seg_label == self.context.ignore_index] = 0
- seg_prob = seg_prob.gather(1, tmp_seg_label).squeeze(1)
- sort_prob, sort_indices = seg_prob[valid_mask].sort()
-
- if sort_prob.numel() > 0:
- min_threshold = sort_prob[min(batch_kept,
- sort_prob.numel() - 1)]
- else:
- min_threshold = 0.0
- threshold = max(min_threshold, self.thresh)
- valid_seg_weight[seg_prob[valid_mask] < threshold] = 1.
- else:
- losses = self.context.loss_decode(
- seg_logit,
- seg_label,
- weight=None,
- ignore_index=self.context.ignore_index,
- reduction_override='none')
- # faster than topk according to https://github.com/pytorch/pytorch/issues/22812 # noqa
- _, sort_indices = losses[valid_mask].sort(descending=True)
- valid_seg_weight[sort_indices[:batch_kept]] = 1.
-
- seg_weight[valid_mask] = valid_seg_weight
-
- return seg_weight
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/backbones/resnest.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/backbones/resnest.py
deleted file mode 100644
index 8931decb876e4d46407fd177a5248fe2554e4062..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/backbones/resnest.py
+++ /dev/null
@@ -1,314 +0,0 @@
-import math
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-import torch.utils.checkpoint as cp
-from mmcv.cnn import build_conv_layer, build_norm_layer
-
-from ..builder import BACKBONES
-from ..utils import ResLayer
-from .resnet import Bottleneck as _Bottleneck
-from .resnet import ResNetV1d
-
-
-class RSoftmax(nn.Module):
- """Radix Softmax module in ``SplitAttentionConv2d``.
-
- Args:
- radix (int): Radix of input.
- groups (int): Groups of input.
- """
-
- def __init__(self, radix, groups):
- super().__init__()
- self.radix = radix
- self.groups = groups
-
- def forward(self, x):
- batch = x.size(0)
- if self.radix > 1:
- x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2)
- x = F.softmax(x, dim=1)
- x = x.reshape(batch, -1)
- else:
- x = torch.sigmoid(x)
- return x
-
-
-class SplitAttentionConv2d(nn.Module):
- """Split-Attention Conv2d in ResNeSt.
-
- Args:
- in_channels (int): Same as nn.Conv2d.
- out_channels (int): Same as nn.Conv2d.
- kernel_size (int | tuple[int]): Same as nn.Conv2d.
- stride (int | tuple[int]): Same as nn.Conv2d.
- padding (int | tuple[int]): Same as nn.Conv2d.
- dilation (int | tuple[int]): Same as nn.Conv2d.
- groups (int): Same as nn.Conv2d.
- radix (int): Radix of SpltAtConv2d. Default: 2
- reduction_factor (int): Reduction factor of inter_channels. Default: 4.
- conv_cfg (dict): Config dict for convolution layer. Default: None,
- which means using conv2d.
- norm_cfg (dict): Config dict for normalization layer. Default: None.
- dcn (dict): Config dict for DCN. Default: None.
- """
-
- def __init__(self,
- in_channels,
- channels,
- kernel_size,
- stride=1,
- padding=0,
- dilation=1,
- groups=1,
- radix=2,
- reduction_factor=4,
- conv_cfg=None,
- norm_cfg=dict(type='BN'),
- dcn=None):
- super(SplitAttentionConv2d, self).__init__()
- inter_channels = max(in_channels * radix // reduction_factor, 32)
- self.radix = radix
- self.groups = groups
- self.channels = channels
- self.with_dcn = dcn is not None
- self.dcn = dcn
- fallback_on_stride = False
- if self.with_dcn:
- fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
- if self.with_dcn and not fallback_on_stride:
- assert conv_cfg is None, 'conv_cfg must be None for DCN'
- conv_cfg = dcn
- self.conv = build_conv_layer(
- conv_cfg,
- in_channels,
- channels * radix,
- kernel_size,
- stride=stride,
- padding=padding,
- dilation=dilation,
- groups=groups * radix,
- bias=False)
- self.norm0_name, norm0 = build_norm_layer(
- norm_cfg, channels * radix, postfix=0)
- self.add_module(self.norm0_name, norm0)
- self.relu = nn.ReLU(inplace=True)
- self.fc1 = build_conv_layer(
- None, channels, inter_channels, 1, groups=self.groups)
- self.norm1_name, norm1 = build_norm_layer(
- norm_cfg, inter_channels, postfix=1)
- self.add_module(self.norm1_name, norm1)
- self.fc2 = build_conv_layer(
- None, inter_channels, channels * radix, 1, groups=self.groups)
- self.rsoftmax = RSoftmax(radix, groups)
-
- @property
- def norm0(self):
- """nn.Module: the normalization layer named "norm0" """
- return getattr(self, self.norm0_name)
-
- @property
- def norm1(self):
- """nn.Module: the normalization layer named "norm1" """
- return getattr(self, self.norm1_name)
-
- def forward(self, x):
- x = self.conv(x)
- x = self.norm0(x)
- x = self.relu(x)
-
- batch, rchannel = x.shape[:2]
- batch = x.size(0)
- if self.radix > 1:
- splits = x.view(batch, self.radix, -1, *x.shape[2:])
- gap = splits.sum(dim=1)
- else:
- gap = x
- gap = F.adaptive_avg_pool2d(gap, 1)
- gap = self.fc1(gap)
-
- gap = self.norm1(gap)
- gap = self.relu(gap)
-
- atten = self.fc2(gap)
- atten = self.rsoftmax(atten).view(batch, -1, 1, 1)
-
- if self.radix > 1:
- attens = atten.view(batch, self.radix, -1, *atten.shape[2:])
- out = torch.sum(attens * splits, dim=1)
- else:
- out = atten * x
- return out.contiguous()
-
-
-class Bottleneck(_Bottleneck):
- """Bottleneck block for ResNeSt.
-
- Args:
- inplane (int): Input planes of this block.
- planes (int): Middle planes of this block.
- groups (int): Groups of conv2.
- width_per_group (int): Width per group of conv2. 64x4d indicates
- ``groups=64, width_per_group=4`` and 32x8d indicates
- ``groups=32, width_per_group=8``.
- radix (int): Radix of SpltAtConv2d. Default: 2
- reduction_factor (int): Reduction factor of inter_channels in
- SplitAttentionConv2d. Default: 4.
- avg_down_stride (bool): Whether to use average pool for stride in
- Bottleneck. Default: True.
- kwargs (dict): Key word arguments for base class.
- """
- expansion = 4
-
- def __init__(self,
- inplanes,
- planes,
- groups=1,
- base_width=4,
- base_channels=64,
- radix=2,
- reduction_factor=4,
- avg_down_stride=True,
- **kwargs):
- """Bottleneck block for ResNeSt."""
- super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
-
- if groups == 1:
- width = self.planes
- else:
- width = math.floor(self.planes *
- (base_width / base_channels)) * groups
-
- self.avg_down_stride = avg_down_stride and self.conv2_stride > 1
-
- self.norm1_name, norm1 = build_norm_layer(
- self.norm_cfg, width, postfix=1)
- self.norm3_name, norm3 = build_norm_layer(
- self.norm_cfg, self.planes * self.expansion, postfix=3)
-
- self.conv1 = build_conv_layer(
- self.conv_cfg,
- self.inplanes,
- width,
- kernel_size=1,
- stride=self.conv1_stride,
- bias=False)
- self.add_module(self.norm1_name, norm1)
- self.with_modulated_dcn = False
- self.conv2 = SplitAttentionConv2d(
- width,
- width,
- kernel_size=3,
- stride=1 if self.avg_down_stride else self.conv2_stride,
- padding=self.dilation,
- dilation=self.dilation,
- groups=groups,
- radix=radix,
- reduction_factor=reduction_factor,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- dcn=self.dcn)
- delattr(self, self.norm2_name)
-
- if self.avg_down_stride:
- self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1)
-
- self.conv3 = build_conv_layer(
- self.conv_cfg,
- width,
- self.planes * self.expansion,
- kernel_size=1,
- bias=False)
- self.add_module(self.norm3_name, norm3)
-
- def forward(self, x):
-
- def _inner_forward(x):
- identity = x
-
- out = self.conv1(x)
- out = self.norm1(out)
- out = self.relu(out)
-
- if self.with_plugins:
- out = self.forward_plugin(out, self.after_conv1_plugin_names)
-
- out = self.conv2(out)
-
- if self.avg_down_stride:
- out = self.avd_layer(out)
-
- if self.with_plugins:
- out = self.forward_plugin(out, self.after_conv2_plugin_names)
-
- out = self.conv3(out)
- out = self.norm3(out)
-
- if self.with_plugins:
- out = self.forward_plugin(out, self.after_conv3_plugin_names)
-
- if self.downsample is not None:
- identity = self.downsample(x)
-
- out += identity
-
- return out
-
- if self.with_cp and x.requires_grad:
- out = cp.checkpoint(_inner_forward, x)
- else:
- out = _inner_forward(x)
-
- out = self.relu(out)
-
- return out
-
-
-@BACKBONES.register_module()
-class ResNeSt(ResNetV1d):
- """ResNeSt backbone.
-
- Args:
- groups (int): Number of groups of Bottleneck. Default: 1
- base_width (int): Base width of Bottleneck. Default: 4
- radix (int): Radix of SpltAtConv2d. Default: 2
- reduction_factor (int): Reduction factor of inter_channels in
- SplitAttentionConv2d. Default: 4.
- avg_down_stride (bool): Whether to use average pool for stride in
- Bottleneck. Default: True.
- kwargs (dict): Keyword arguments for ResNet.
- """
-
- arch_settings = {
- 50: (Bottleneck, (3, 4, 6, 3)),
- 101: (Bottleneck, (3, 4, 23, 3)),
- 152: (Bottleneck, (3, 8, 36, 3)),
- 200: (Bottleneck, (3, 24, 36, 3))
- }
-
- def __init__(self,
- groups=1,
- base_width=4,
- radix=2,
- reduction_factor=4,
- avg_down_stride=True,
- **kwargs):
- self.groups = groups
- self.base_width = base_width
- self.radix = radix
- self.reduction_factor = reduction_factor
- self.avg_down_stride = avg_down_stride
- super(ResNeSt, self).__init__(**kwargs)
-
- def make_res_layer(self, **kwargs):
- """Pack all blocks in a stage into a ``ResLayer``."""
- return ResLayer(
- groups=self.groups,
- base_width=self.base_width,
- base_channels=self.base_channels,
- radix=self.radix,
- reduction_factor=self.reduction_factor,
- avg_down_stride=self.avg_down_stride,
- **kwargs)
diff --git a/spaces/HaHaBill/LandShapes-Antarctica/models/biggan/pytorch_biggan/pytorch_pretrained_biggan/model.py b/spaces/HaHaBill/LandShapes-Antarctica/models/biggan/pytorch_biggan/pytorch_pretrained_biggan/model.py
deleted file mode 100644
index 22488abd92182a878fa1bedadfed50afbb472d3e..0000000000000000000000000000000000000000
--- a/spaces/HaHaBill/LandShapes-Antarctica/models/biggan/pytorch_biggan/pytorch_pretrained_biggan/model.py
+++ /dev/null
@@ -1,345 +0,0 @@
-# coding: utf-8
-""" BigGAN PyTorch model.
- From "Large Scale GAN Training for High Fidelity Natural Image Synthesis"
- By Andrew Brocky, Jeff Donahuey and Karen Simonyan.
- https://openreview.net/forum?id=B1xsqj09Fm
-
- PyTorch version implemented from the computational graph of the TF Hub module for BigGAN.
- Some part of the code are adapted from https://github.com/brain-research/self-attention-gan
-
- This version only comprises the generator (since the discriminator's weights are not released).
- This version only comprises the "deep" version of BigGAN (see publication).
-
- Modified by Erik Härkönen:
- * Added support for per-layer latent vectors
-"""
-from __future__ import (absolute_import, division, print_function, unicode_literals)
-
-import os
-import logging
-import math
-
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from .config import BigGANConfig
-from .file_utils import cached_path
-
-logger = logging.getLogger(__name__)
-
-PRETRAINED_MODEL_ARCHIVE_MAP = {
- 'biggan-deep-128': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-128-pytorch_model.bin",
- 'biggan-deep-256': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-256-pytorch_model.bin",
- 'biggan-deep-512': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-512-pytorch_model.bin",
-}
-
-PRETRAINED_CONFIG_ARCHIVE_MAP = {
- 'biggan-deep-128': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-128-config.json",
- 'biggan-deep-256': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-256-config.json",
- 'biggan-deep-512': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-512-config.json",
-}
-
-WEIGHTS_NAME = 'pytorch_model.bin'
-CONFIG_NAME = 'config.json'
-
-
-def snconv2d(eps=1e-12, **kwargs):
- return nn.utils.spectral_norm(nn.Conv2d(**kwargs), eps=eps)
-
-def snlinear(eps=1e-12, **kwargs):
- return nn.utils.spectral_norm(nn.Linear(**kwargs), eps=eps)
-
-def sn_embedding(eps=1e-12, **kwargs):
- return nn.utils.spectral_norm(nn.Embedding(**kwargs), eps=eps)
-
-class SelfAttn(nn.Module):
- """ Self attention Layer"""
- def __init__(self, in_channels, eps=1e-12):
- super(SelfAttn, self).__init__()
- self.in_channels = in_channels
- self.snconv1x1_theta = snconv2d(in_channels=in_channels, out_channels=in_channels//8,
- kernel_size=1, bias=False, eps=eps)
- self.snconv1x1_phi = snconv2d(in_channels=in_channels, out_channels=in_channels//8,
- kernel_size=1, bias=False, eps=eps)
- self.snconv1x1_g = snconv2d(in_channels=in_channels, out_channels=in_channels//2,
- kernel_size=1, bias=False, eps=eps)
- self.snconv1x1_o_conv = snconv2d(in_channels=in_channels//2, out_channels=in_channels,
- kernel_size=1, bias=False, eps=eps)
- self.maxpool = nn.MaxPool2d(2, stride=2, padding=0)
- self.softmax = nn.Softmax(dim=-1)
- self.gamma = nn.Parameter(torch.zeros(1))
-
- def forward(self, x):
- _, ch, h, w = x.size()
- # Theta path
- theta = self.snconv1x1_theta(x)
- theta = theta.view(-1, ch//8, h*w)
- # Phi path
- phi = self.snconv1x1_phi(x)
- phi = self.maxpool(phi)
- phi = phi.view(-1, ch//8, h*w//4)
- # Attn map
- attn = torch.bmm(theta.permute(0, 2, 1), phi)
- attn = self.softmax(attn)
- # g path
- g = self.snconv1x1_g(x)
- g = self.maxpool(g)
- g = g.view(-1, ch//2, h*w//4)
- # Attn_g - o_conv
- attn_g = torch.bmm(g, attn.permute(0, 2, 1))
- attn_g = attn_g.view(-1, ch//2, h, w)
- attn_g = self.snconv1x1_o_conv(attn_g)
- # Out
- out = x + self.gamma*attn_g
- return out
-
-
-class BigGANBatchNorm(nn.Module):
- """ This is a batch norm module that can handle conditional input and can be provided with pre-computed
- activation means and variances for various truncation parameters.
-
- We cannot just rely on torch.batch_norm since it cannot handle
- batched weights (pytorch 1.0.1). We computate batch_norm our-self without updating running means and variances.
- If you want to train this model you should add running means and variance computation logic.
- """
- def __init__(self, num_features, condition_vector_dim=None, n_stats=51, eps=1e-4, conditional=True):
- super(BigGANBatchNorm, self).__init__()
- self.num_features = num_features
- self.eps = eps
- self.conditional = conditional
-
- # We use pre-computed statistics for n_stats values of truncation between 0 and 1
- self.register_buffer('running_means', torch.zeros(n_stats, num_features))
- self.register_buffer('running_vars', torch.ones(n_stats, num_features))
- self.step_size = 1.0 / (n_stats - 1)
-
- if conditional:
- assert condition_vector_dim is not None
- self.scale = snlinear(in_features=condition_vector_dim, out_features=num_features, bias=False, eps=eps)
- self.offset = snlinear(in_features=condition_vector_dim, out_features=num_features, bias=False, eps=eps)
- else:
- self.weight = torch.nn.Parameter(torch.Tensor(num_features))
- self.bias = torch.nn.Parameter(torch.Tensor(num_features))
-
- def forward(self, x, truncation, condition_vector=None):
- # Retreive pre-computed statistics associated to this truncation
- coef, start_idx = math.modf(truncation / self.step_size)
- start_idx = int(start_idx)
- if coef != 0.0: # Interpolate
- running_mean = self.running_means[start_idx] * coef + self.running_means[start_idx + 1] * (1 - coef)
- running_var = self.running_vars[start_idx] * coef + self.running_vars[start_idx + 1] * (1 - coef)
- else:
- running_mean = self.running_means[start_idx]
- running_var = self.running_vars[start_idx]
-
- if self.conditional:
- running_mean = running_mean.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
- running_var = running_var.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
-
- weight = 1 + self.scale(condition_vector).unsqueeze(-1).unsqueeze(-1)
- bias = self.offset(condition_vector).unsqueeze(-1).unsqueeze(-1)
-
- out = (x - running_mean) / torch.sqrt(running_var + self.eps) * weight + bias
- else:
- out = F.batch_norm(x, running_mean, running_var, self.weight, self.bias,
- training=False, momentum=0.0, eps=self.eps)
-
- return out
-
-
-class GenBlock(nn.Module):
- def __init__(self, in_size, out_size, condition_vector_dim, reduction_factor=4, up_sample=False,
- n_stats=51, eps=1e-12):
- super(GenBlock, self).__init__()
- self.up_sample = up_sample
- self.drop_channels = (in_size != out_size)
- middle_size = in_size // reduction_factor
-
- self.bn_0 = BigGANBatchNorm(in_size, condition_vector_dim, n_stats=n_stats, eps=eps, conditional=True)
- self.conv_0 = snconv2d(in_channels=in_size, out_channels=middle_size, kernel_size=1, eps=eps)
-
- self.bn_1 = BigGANBatchNorm(middle_size, condition_vector_dim, n_stats=n_stats, eps=eps, conditional=True)
- self.conv_1 = snconv2d(in_channels=middle_size, out_channels=middle_size, kernel_size=3, padding=1, eps=eps)
-
- self.bn_2 = BigGANBatchNorm(middle_size, condition_vector_dim, n_stats=n_stats, eps=eps, conditional=True)
- self.conv_2 = snconv2d(in_channels=middle_size, out_channels=middle_size, kernel_size=3, padding=1, eps=eps)
-
- self.bn_3 = BigGANBatchNorm(middle_size, condition_vector_dim, n_stats=n_stats, eps=eps, conditional=True)
- self.conv_3 = snconv2d(in_channels=middle_size, out_channels=out_size, kernel_size=1, eps=eps)
-
- self.relu = nn.ReLU()
-
- def forward(self, x, cond_vector, truncation):
- x0 = x
-
- x = self.bn_0(x, truncation, cond_vector)
- x = self.relu(x)
- x = self.conv_0(x)
-
- x = self.bn_1(x, truncation, cond_vector)
- x = self.relu(x)
- if self.up_sample:
- x = F.interpolate(x, scale_factor=2, mode='nearest')
- x = self.conv_1(x)
-
- x = self.bn_2(x, truncation, cond_vector)
- x = self.relu(x)
- x = self.conv_2(x)
-
- x = self.bn_3(x, truncation, cond_vector)
- x = self.relu(x)
- x = self.conv_3(x)
-
- if self.drop_channels:
- new_channels = x0.shape[1] // 2
- x0 = x0[:, :new_channels, ...]
- if self.up_sample:
- x0 = F.interpolate(x0, scale_factor=2, mode='nearest')
-
- out = x + x0
- return out
-
-class Generator(nn.Module):
- def __init__(self, config):
- super(Generator, self).__init__()
- self.config = config
- ch = config.channel_width
- condition_vector_dim = config.z_dim * 2
-
- self.gen_z = snlinear(in_features=condition_vector_dim,
- out_features=4 * 4 * 16 * ch, eps=config.eps)
-
- layers = []
- for i, layer in enumerate(config.layers):
- if i == config.attention_layer_position:
- layers.append(SelfAttn(ch*layer[1], eps=config.eps))
- layers.append(GenBlock(ch*layer[1],
- ch*layer[2],
- condition_vector_dim,
- up_sample=layer[0],
- n_stats=config.n_stats,
- eps=config.eps))
- self.layers = nn.ModuleList(layers)
-
- self.bn = BigGANBatchNorm(ch, n_stats=config.n_stats, eps=config.eps, conditional=False)
- self.relu = nn.ReLU()
- self.conv_to_rgb = snconv2d(in_channels=ch, out_channels=ch, kernel_size=3, padding=1, eps=config.eps)
- self.tanh = nn.Tanh()
-
- def forward(self, cond_vector, truncation):
- z = self.gen_z(cond_vector[0])
-
- # We use this conversion step to be able to use TF weights:
- # TF convention on shape is [batch, height, width, channels]
- # PT convention on shape is [batch, channels, height, width]
- z = z.view(-1, 4, 4, 16 * self.config.channel_width)
- z = z.permute(0, 3, 1, 2).contiguous()
-
- cond_idx = 1
- for i, layer in enumerate(self.layers):
- if isinstance(layer, GenBlock):
- z = layer(z, cond_vector[cond_idx], truncation)
- cond_idx += 1
- else:
- z = layer(z)
-
- z = self.bn(z, truncation)
- z = self.relu(z)
- z = self.conv_to_rgb(z)
- z = z[:, :3, ...]
- z = self.tanh(z)
- return z
-
-class BigGAN(nn.Module):
- """BigGAN Generator."""
-
- @classmethod
- def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
- if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
- model_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
- config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
- else:
- model_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
- config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
-
- try:
- resolved_model_file = cached_path(model_file, cache_dir=cache_dir)
- resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
- except EnvironmentError:
- logger.error("Wrong model name, should be a valid path to a folder containing "
- "a {} file and a {} file or a model name in {}".format(
- WEIGHTS_NAME, CONFIG_NAME, PRETRAINED_MODEL_ARCHIVE_MAP.keys()))
- raise
-
- logger.info("loading model {} from cache at {}".format(pretrained_model_name_or_path, resolved_model_file))
-
- # Load config
- config = BigGANConfig.from_json_file(resolved_config_file)
- logger.info("Model config {}".format(config))
-
- # Instantiate model.
- model = cls(config, *inputs, **kwargs)
- state_dict = torch.load(resolved_model_file, map_location='cpu' if not torch.cuda.is_available() else None)
- model.load_state_dict(state_dict, strict=False)
- return model
-
- def __init__(self, config):
- super(BigGAN, self).__init__()
- self.config = config
- self.embeddings = nn.Linear(config.num_classes, config.z_dim, bias=False)
- self.generator = Generator(config)
- self.n_latents = len(config.layers) + 1 # one for gen_z + one per layer
-
- def forward(self, z, class_label, truncation):
- assert 0 < truncation <= 1
-
- if not isinstance(z, list):
- z = self.n_latents*[z]
-
- if isinstance(class_label, list):
- embed = [self.embeddings(l) for l in class_label]
- else:
- embed = self.n_latents*[self.embeddings(class_label)]
-
- assert len(z) == self.n_latents, f'Expected {self.n_latents} latents, got {len(z)}'
- assert len(embed) == self.n_latents, f'Expected {self.n_latents} class vectors, got {len(class_label)}'
-
- cond_vectors = [torch.cat((z, e), dim=1) for (z, e) in zip(z, embed)]
- z = self.generator(cond_vectors, truncation)
- return z
-
-
-if __name__ == "__main__":
- import PIL
- from .utils import truncated_noise_sample, save_as_images, one_hot_from_names
- from .convert_tf_to_pytorch import load_tf_weights_in_biggan
-
- load_cache = False
- cache_path = './saved_model.pt'
- config = BigGANConfig()
- model = BigGAN(config)
- if not load_cache:
- model = load_tf_weights_in_biggan(model, config, './models/model_128/', './models/model_128/batchnorms_stats.bin')
- torch.save(model.state_dict(), cache_path)
- else:
- model.load_state_dict(torch.load(cache_path))
-
- model.eval()
-
- truncation = 0.4
- noise = truncated_noise_sample(batch_size=2, truncation=truncation)
- label = one_hot_from_names('diver', batch_size=2)
-
- # Tests
- # noise = np.zeros((1, 128))
- # label = [983]
-
- noise = torch.tensor(noise, dtype=torch.float)
- label = torch.tensor(label, dtype=torch.float)
- with torch.no_grad():
- outputs = model(noise, label, truncation)
- print(outputs.shape)
-
- save_as_images(outputs)
diff --git a/spaces/Hallucinate/demo/taming/data/base.py b/spaces/Hallucinate/demo/taming/data/base.py
deleted file mode 100644
index e21667df4ce4baa6bb6aad9f8679bd756e2ffdb7..0000000000000000000000000000000000000000
--- a/spaces/Hallucinate/demo/taming/data/base.py
+++ /dev/null
@@ -1,70 +0,0 @@
-import bisect
-import numpy as np
-import albumentations
-from PIL import Image
-from torch.utils.data import Dataset, ConcatDataset
-
-
-class ConcatDatasetWithIndex(ConcatDataset):
- """Modified from original pytorch code to return dataset idx"""
- def __getitem__(self, idx):
- if idx < 0:
- if -idx > len(self):
- raise ValueError("absolute value of index should not exceed dataset length")
- idx = len(self) + idx
- dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
- if dataset_idx == 0:
- sample_idx = idx
- else:
- sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
- return self.datasets[dataset_idx][sample_idx], dataset_idx
-
-
-class ImagePaths(Dataset):
- def __init__(self, paths, size=None, random_crop=False, labels=None):
- self.size = size
- self.random_crop = random_crop
-
- self.labels = dict() if labels is None else labels
- self.labels["file_path_"] = paths
- self._length = len(paths)
-
- if self.size is not None and self.size > 0:
- self.rescaler = albumentations.SmallestMaxSize(max_size = self.size)
- if not self.random_crop:
- self.cropper = albumentations.CenterCrop(height=self.size,width=self.size)
- else:
- self.cropper = albumentations.RandomCrop(height=self.size,width=self.size)
- self.preprocessor = albumentations.Compose([self.rescaler, self.cropper])
- else:
- self.preprocessor = lambda **kwargs: kwargs
-
- def __len__(self):
- return self._length
-
- def preprocess_image(self, image_path):
- image = Image.open(image_path)
- if not image.mode == "RGB":
- image = image.convert("RGB")
- image = np.array(image).astype(np.uint8)
- image = self.preprocessor(image=image)["image"]
- image = (image/127.5 - 1.0).astype(np.float32)
- return image
-
- def __getitem__(self, i):
- example = dict()
- example["image"] = self.preprocess_image(self.labels["file_path_"][i])
- for k in self.labels:
- example[k] = self.labels[k][i]
- return example
-
-
-class NumpyPaths(ImagePaths):
- def preprocess_image(self, image_path):
- image = np.load(image_path).squeeze(0) # 3 x 1024 x 1024
- image = np.transpose(image, (1,2,0))
- image = Image.fromarray(image, mode="RGB")
- image = np.array(image).astype(np.uint8)
- image = self.preprocessor(image=image)["image"]
- image = (image/127.5 - 1.0).astype(np.float32)
- return image
diff --git a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/templates/frontend/assets/Upload.5d0148e8.js b/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/templates/frontend/assets/Upload.5d0148e8.js
deleted file mode 100644
index e466eef365507ce3f3f55ae30d495651e33e7498..0000000000000000000000000000000000000000
--- a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/templates/frontend/assets/Upload.5d0148e8.js
+++ /dev/null
@@ -1,2 +0,0 @@
-import{S as J,i as M,s as N,p as P,e as D,a as Q,b as o,d as p,f as V,g as R,l as s,W as h,z as b,u as X,q as Y,r as Z,j as x,k as $,n as ee,A as le,F as te,K as y,I as ne}from"./index.396f4a72.js";function ie(t){let l,i,a,g,m,c,f,d,k,F;const _=t[14].default,r=P(_,t,t[13],null);return{c(){l=D("div"),r&&r.c(),i=Q(),a=D("input"),o(a,"class","hidden-upload hidden"),o(a,"type","file"),o(a,"accept",t[0]),a.multiple=g=t[4]==="multiple"||void 0,o(a,"webkitdirectory",m=t[4]==="directory"||void 0),o(a,"mozdirectory",c=t[4]==="directory"||void 0),o(l,"class",f="w-full cursor-pointer h-full items-center justify-center text-gray-400 md:text-xl "+(t[1]?"min-h-[10rem] md:min-h-[15rem] max-h-[15rem] xl:max-h-[18rem] 2xl:max-h-[20rem]":"")),p(l,"text-center",t[2]),p(l,"flex",t[3])},m(n,u){V(n,l,u),r&&r.m(l,null),R(l,i),R(l,a),t[22](a),d=!0,k||(F=[s(a,"change",t[8]),s(l,"drag",h(b(t[15]))),s(l,"dragstart",h(b(t[16]))),s(l,"dragend",h(b(t[17]))),s(l,"dragover",h(b(t[18]))),s(l,"dragenter",h(b(t[19]))),s(l,"dragleave",h(b(t[20]))),s(l,"drop",h(b(t[21]))),s(l,"click",t[7]),s(l,"drop",t[9]),s(l,"dragenter",t[6]),s(l,"dragleave",t[6])],k=!0)},p(n,[u]){r&&r.p&&(!d||u&8192)&&X(r,_,n,n[13],d?Z(_,n[13],u,null):Y(n[13]),null),(!d||u&1)&&o(a,"accept",n[0]),(!d||u&16&&g!==(g=n[4]==="multiple"||void 0))&&(a.multiple=g),(!d||u&16&&m!==(m=n[4]==="directory"||void 0))&&o(a,"webkitdirectory",m),(!d||u&16&&c!==(c=n[4]==="directory"||void 0))&&o(a,"mozdirectory",c),(!d||u&2&&f!==(f="w-full cursor-pointer h-full items-center justify-center text-gray-400 md:text-xl "+(n[1]?"min-h-[10rem] md:min-h-[15rem] max-h-[15rem] xl:max-h-[18rem] 2xl:max-h-[20rem]":"")))&&o(l,"class",f),u&6&&p(l,"text-center",n[2]),u&10&&p(l,"flex",n[3])},i(n){d||(x(r,n),d=!0)},o(n){$(r,n),d=!1},d(n){n&&ee(l),r&&r.d(n),t[22](null),k=!1,le(F)}}}function ae(t,l,i){let{$$slots:a={},$$scope:g}=l,{filetype:m=void 0}=l,{include_file_metadata:c=!0}=l,{dragging:f=!1}=l,{boundedheight:d=!0}=l,{center:k=!0}=l,{flex:F=!0}=l,{file_count:_="single"}=l,{disable_click:r=!1}=l,n;const u=te(),A=()=>{i(10,f=!f)},q=()=>{r||(i(5,n.value="",n),n.click())},j=e=>{let w=Array.from(e);if(!(!e.length||!window.FileReader)){_==="single"&&(w=[e[0]]);var z=[];w.forEach((U,G)=>{let v=new FileReader;v.readAsDataURL(U),v.onloadend=function(){z[G]=c?{name:U.name,size:U.size,data:this.result}:this.result,z.filter(H=>H!==void 0).length===e.length&&u("load",_=="single"?z[0]:z)}})}},E=e=>{const w=e.target;!w.files||j(w.files)},S=e=>{i(10,f=!1),e.dataTransfer?.files&&j(e.dataTransfer.files)};function T(e){y.call(this,t,e)}function C(e){y.call(this,t,e)}function I(e){y.call(this,t,e)}function K(e){y.call(this,t,e)}function L(e){y.call(this,t,e)}function O(e){y.call(this,t,e)}function W(e){y.call(this,t,e)}function B(e){ne[e?"unshift":"push"](()=>{n=e,i(5,n)})}return t.$$set=e=>{"filetype"in e&&i(0,m=e.filetype),"include_file_metadata"in e&&i(11,c=e.include_file_metadata),"dragging"in e&&i(10,f=e.dragging),"boundedheight"in e&&i(1,d=e.boundedheight),"center"in e&&i(2,k=e.center),"flex"in e&&i(3,F=e.flex),"file_count"in e&&i(4,_=e.file_count),"disable_click"in e&&i(12,r=e.disable_click),"$$scope"in e&&i(13,g=e.$$scope)},[m,d,k,F,_,n,A,q,E,S,f,c,r,g,a,T,C,I,K,L,O,W,B]}class de extends J{constructor(l){super(),M(this,l,ae,ie,N,{filetype:0,include_file_metadata:11,dragging:10,boundedheight:1,center:2,flex:3,file_count:4,disable_click:12})}}export{de as U};
-//# sourceMappingURL=Upload.5d0148e8.js.map
diff --git a/spaces/HuangLab/CELL-E_2-Image_Prediction/taming/models/vqgan.py b/spaces/HuangLab/CELL-E_2-Image_Prediction/taming/models/vqgan.py
deleted file mode 100644
index faa659451e01aea3a08dbdb590e6d71cd7b1afc2..0000000000000000000000000000000000000000
--- a/spaces/HuangLab/CELL-E_2-Image_Prediction/taming/models/vqgan.py
+++ /dev/null
@@ -1,649 +0,0 @@
-import torch
-import torch.nn.functional as F
-import pytorch_lightning as pl
-
-from celle_taming_main import instantiate_from_config
-
-from taming.modules.diffusionmodules.model import Encoder, Decoder
-from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
-from taming.modules.vqvae.quantize import GumbelQuantize
-from taming.modules.vqvae.quantize import EMAVectorQuantizer
-
-
-class VQModel(pl.LightningModule):
- def __init__(
- self,
- ddconfig,
- lossconfig,
- n_embed,
- embed_dim,
- ckpt_path=None,
- ignore_keys=[],
- image_key="image",
- colorize_nlabels=None,
- monitor=None,
- remap=None,
- sane_index_shape=False, # tell vector quantizer to return indices as bhw
- ):
- super().__init__()
- self.image_key = image_key
- self.encoder = Encoder(**ddconfig)
- self.decoder = Decoder(**ddconfig)
- self.loss = instantiate_from_config(lossconfig)
- self.quantize = VectorQuantizer(
- n_embed,
- embed_dim,
- beta=0.25,
- remap=remap,
- sane_index_shape=sane_index_shape,
- )
- self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1)
- self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
- if ckpt_path is not None:
- self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
- self.image_key = image_key
- if colorize_nlabels is not None:
- assert type(colorize_nlabels) == int
- self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
- if monitor is not None:
- self.monitor = monitor
-
- def init_from_ckpt(self, path, ignore_keys=list()):
- sd = torch.load(path, map_location="cpu")["state_dict"]
- keys = list(sd.keys())
- for k in keys:
- for ik in ignore_keys:
- if k.startswith(ik):
- print("Deleting key {} from state_dict.".format(k))
- del sd[k]
- self.load_state_dict(sd, strict=False)
- print(f"Restored from {path}")
-
- def encode(self, x):
- h = self.encoder(x)
- h = self.quant_conv(h)
- quant, emb_loss, info = self.quantize(h)
- return quant, emb_loss, info
-
- def decode(self, quant):
- quant = self.post_quant_conv(quant)
- dec = self.decoder(quant)
- return dec
-
- def decode_code(self, code_b):
- quant_b = self.quantize.embed_code(code_b)
- dec = self.decode(quant_b)
- return dec
-
- def forward(self, input):
- quant, diff, _ = self.encode(input)
- dec = self.decode(quant)
- return dec, diff
-
- def get_input(self, batch, k):
-
- if k == "mixed":
- keys = ["nucleus", "target"]
- index = torch.randint(low=0, high=2, size=(1,), dtype=int).item()
- k = keys[index]
-
- x = batch[k]
- if len(x.shape) == 3:
- x = x[..., None]
-
- # x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format)
- return x
-
- def training_step(self, batch, batch_idx=None, optimizer_idx=0):
-
- if type(batch) == dict:
-
- x = self.get_input(batch, self.image_key)
-
- else:
- x = batch
-
- xrec, qloss = self(
- x,
- )
-
- if optimizer_idx == 0:
- # autoencode
- aeloss, log_dict_ae = self.loss(
- qloss,
- x,
- xrec,
- optimizer_idx,
- self.global_step,
- last_layer=self.get_last_layer(),
- split="train",
- )
-
- self.log(
- "train/aeloss",
- aeloss,
- prog_bar=True,
- logger=True,
- on_step=True,
- on_epoch=True,
- sync_dist=True,
- )
- self.log_dict(
- log_dict_ae,
- prog_bar=False,
- logger=True,
- on_step=True,
- on_epoch=True,
- sync_dist=True,
- )
- return aeloss
-
- if optimizer_idx == 1:
- # discriminator
- discloss, log_dict_disc = self.loss(
- qloss,
- x,
- xrec,
- optimizer_idx,
- self.global_step,
- last_layer=self.get_last_layer(),
- split="train",
- )
- self.log(
- "train/discloss",
- discloss,
- prog_bar=True,
- logger=True,
- on_step=True,
- on_epoch=True,
- sync_dist=True,
- )
- self.log_dict(
- log_dict_disc,
- prog_bar=False,
- logger=True,
- on_step=True,
- on_epoch=True,
- sync_dist=True,
- )
- return discloss
-
- def validation_step(self, batch, batch_idx):
-
- if type(batch) == dict:
-
- x = self.get_input(batch, self.image_key)
-
- else:
- x = batch
-
- xrec, qloss = self(x)
- aeloss, log_dict_ae = self.loss(
- qloss,
- x,
- xrec,
- 0,
- self.global_step,
- last_layer=self.get_last_layer(),
- split="val",
- )
-
- discloss, log_dict_disc = self.loss(
- qloss,
- x,
- xrec,
- 1,
- self.global_step,
- last_layer=self.get_last_layer(),
- split="val",
- )
- # rec_loss = log_dict_ae["val/rec_loss"]
- # self.log(
- # "val/rec_loss",
- # rec_loss,
- # prog_bar=True,
- # logger=True,
- # on_step=True,
- # on_epoch=True,
- # sync_dist=True,
- # )
- # self.log(
- # "val/aeloss",
- # aeloss,
- # prog_bar=True,
- # logger=True,
- # on_step=True,
- # on_epoch=True,
- # sync_dist=True,
- # )
-
- for key, value in log_dict_disc.items():
- if key in log_dict_ae:
- log_dict_ae[key].extend(value)
- else:
- log_dict_ae[key] = value
-
- self.log_dict(log_dict_ae, sync_dist=True)
- return self.log_dict
-
- def configure_optimizers(self):
- lr = self.learning_rate
- opt_ae = torch.optim.Adam(
- list(self.encoder.parameters())
- + list(self.decoder.parameters())
- + list(self.quantize.parameters())
- + list(self.quant_conv.parameters())
- + list(self.post_quant_conv.parameters()),
- lr=lr,
- betas=(0.5, 0.9),
- )
- opt_disc = torch.optim.Adam(
- self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)
- )
- return [opt_ae, opt_disc], []
-
- def get_last_layer(self):
- return self.decoder.conv_out.weight
-
- def log_images(self, batch, **kwargs):
- log = dict()
- x = self.get_input(batch, self.image_key)
- x = x.to(self.device)
- xrec, _ = self(x)
- if x.shape[1] > 3:
- # colorize with random projection
- assert xrec.shape[1] > 3
- x = self.to_rgb(x)
- xrec = self.to_rgb(xrec)
- log["inputs"] = x
- log["reconstructions"] = xrec
- return log
-
- def to_rgb(self, x):
- assert self.image_key == "segmentation"
- if not hasattr(self, "colorize"):
- self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
- x = F.conv2d(x, weight=self.colorize)
- x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0
- return x
-
-
-class VQSegmentationModel(VQModel):
- def __init__(self, n_labels, *args, **kwargs):
- super().__init__(*args, **kwargs)
- self.register_buffer("colorize", torch.randn(3, n_labels, 1, 1))
-
- def configure_optimizers(self):
- lr = self.learning_rate
- opt_ae = torch.optim.Adam(
- list(self.encoder.parameters())
- + list(self.decoder.parameters())
- + list(self.quantize.parameters())
- + list(self.quant_conv.parameters())
- + list(self.post_quant_conv.parameters()),
- lr=lr,
- betas=(0.5, 0.9),
- )
- return opt_ae
-
- def training_step(self, batch, batch_idx):
- x = self.get_input(batch, self.image_key)
- xrec, qloss = self(x)
- aeloss, log_dict_ae = self.loss(qloss, x, xrec, split="train")
- self.log_dict(
- log_dict_ae,
- prog_bar=False,
- logger=True,
- on_step=True,
- on_epoch=True,
- sync_dist=True,
- )
- return aeloss
-
- def validation_step(self, batch, batch_idx):
- x = self.get_input(batch, self.image_key)
- xrec, qloss = self(x)
- aeloss, log_dict_ae = self.loss(qloss, x, xrec, split="val")
- self.log_dict(
- log_dict_ae,
- prog_bar=False,
- logger=True,
- on_step=True,
- on_epoch=True,
- sync_dist=True,
- )
- total_loss = log_dict_ae["val/total_loss"]
- self.log(
- "val/total_loss",
- total_loss,
- prog_bar=True,
- logger=True,
- on_step=True,
- on_epoch=True,
- sync_dist=True,
- )
- return aeloss
-
- @torch.no_grad()
- def log_images(self, batch, **kwargs):
- log = dict()
- x = self.get_input(batch, self.image_key)
- x = x.to(self.device)
- xrec, _ = self(x)
- if x.shape[1] > 3:
- # colorize with random projection
- assert xrec.shape[1] > 3
- # convert logits to indices
- xrec = torch.argmax(xrec, dim=1, keepdim=True)
- xrec = F.one_hot(xrec, num_classes=x.shape[1])
- xrec = xrec.squeeze(1).permute(0, 3, 1, 2).float()
- x = self.to_rgb(x)
- xrec = self.to_rgb(xrec)
- log["inputs"] = x
- log["reconstructions"] = xrec
- return log
-
-
-class VQNoDiscModel(VQModel):
- def __init__(
- self,
- ddconfig,
- lossconfig,
- n_embed,
- embed_dim,
- ckpt_path=None,
- ignore_keys=[],
- image_key="image",
- colorize_nlabels=None,
- ):
- super().__init__(
- ddconfig=ddconfig,
- lossconfig=lossconfig,
- n_embed=n_embed,
- embed_dim=embed_dim,
- ckpt_path=ckpt_path,
- ignore_keys=ignore_keys,
- image_key=image_key,
- colorize_nlabels=colorize_nlabels,
- )
-
- def training_step(self, batch, batch_idx):
- x = self.get_input(batch, self.image_key)
- xrec, qloss = self(x)
- # autoencode
- aeloss, log_dict_ae = self.loss(qloss, x, xrec, self.global_step, split="train")
- output = pl.TrainResult(minimize=aeloss)
- output.log(
- "train/aeloss",
- aeloss,
- prog_bar=True,
- logger=True,
- on_step=True,
- on_epoch=True,
- )
- output.log_dict(
- log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True
- )
- return output
-
- def validation_step(self, batch, batch_idx):
- x = self.get_input(batch, self.image_key)
- xrec, qloss = self(x)
- aeloss, log_dict_ae = self.loss(qloss, x, xrec, self.global_step, split="val")
- rec_loss = log_dict_ae["val/rec_loss"]
- output = pl.EvalResult(checkpoint_on=rec_loss)
- output.log(
- "val/rec_loss",
- rec_loss,
- prog_bar=True,
- logger=True,
- on_step=True,
- on_epoch=True,
- )
- output.log(
- "val/aeloss",
- aeloss,
- prog_bar=True,
- logger=True,
- on_step=True,
- on_epoch=True,
- )
- output.log_dict(log_dict_ae)
-
- return output
-
- def configure_optimizers(self):
- optimizer = torch.optim.Adam(
- list(self.encoder.parameters())
- + list(self.decoder.parameters())
- + list(self.quantize.parameters())
- + list(self.quant_conv.parameters())
- + list(self.post_quant_conv.parameters()),
- lr=self.learning_rate,
- betas=(0.5, 0.9),
- )
- return optimizer
-
-
-class GumbelVQ(VQModel):
- def __init__(
- self,
- ddconfig,
- lossconfig,
- n_embed,
- embed_dim,
- temperature_scheduler_config,
- ckpt_path=None,
- ignore_keys=[],
- image_key="image",
- colorize_nlabels=None,
- monitor=None,
- kl_weight=1e-8,
- remap=None,
- ):
-
- z_channels = ddconfig["z_channels"]
- super().__init__(
- ddconfig,
- lossconfig,
- n_embed,
- embed_dim,
- ckpt_path=None,
- ignore_keys=ignore_keys,
- image_key=image_key,
- colorize_nlabels=colorize_nlabels,
- monitor=monitor,
- )
-
- self.loss.n_classes = n_embed
- self.vocab_size = n_embed
-
- self.quantize = GumbelQuantize(
- z_channels,
- embed_dim,
- n_embed=n_embed,
- kl_weight=kl_weight,
- temp_init=1.0,
- remap=remap,
- )
-
- self.temperature_scheduler = instantiate_from_config(
- temperature_scheduler_config
- ) # annealing of temp
-
- if ckpt_path is not None:
- self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
-
- def temperature_scheduling(self):
- self.quantize.temperature = self.temperature_scheduler(self.global_step)
-
- def encode_to_prequant(self, x):
- h = self.encoder(x)
- h = self.quant_conv(h)
- return h
-
- def decode_code(self, code_b):
- raise NotImplementedError
-
- def training_step(self, batch, batch_idx=None, optimizer_idx=0):
- self.temperature_scheduling()
- x = self.get_input(batch, self.image_key)
- xrec, qloss = self(x)
-
- if optimizer_idx == 0:
- # autoencode
- aeloss, log_dict_ae = self.loss(
- qloss,
- x,
- xrec,
- optimizer_idx,
- self.global_step,
- last_layer=self.get_last_layer(),
- split="train",
- )
-
- self.log_dict(
- log_dict_ae,
- prog_bar=False,
- logger=True,
- on_step=True,
- on_epoch=True,
- sync_dist=True,
- )
- self.log(
- "temperature",
- self.quantize.temperature,
- prog_bar=False,
- logger=True,
- on_step=True,
- on_epoch=True,
- sync_dist=True,
- )
- return aeloss
-
- if optimizer_idx == 1:
- # discriminator
- discloss, log_dict_disc = self.loss(
- qloss,
- x,
- xrec,
- optimizer_idx,
- self.global_step,
- last_layer=self.get_last_layer(),
- split="train",
- )
- self.log_dict(
- log_dict_disc,
- prog_bar=False,
- logger=True,
- on_step=True,
- on_epoch=True,
- sync_dist=True,
- )
- return discloss
-
- def validation_step(self, batch, batch_idx):
- x = self.get_input(batch, self.image_key)
- xrec, qloss = self(x)
- aeloss, log_dict_ae = self.loss(
- qloss,
- x,
- xrec,
- 0,
- self.global_step,
- last_layer=self.get_last_layer(),
- split="val",
- )
-
- discloss, log_dict_disc = self.loss(
- qloss,
- x,
- xrec,
- 1,
- self.global_step,
- last_layer=self.get_last_layer(),
- split="val",
- )
- rec_loss = log_dict_ae["val/rec_loss"]
- self.log(
- "val/rec_loss",
- rec_loss,
- prog_bar=True,
- logger=True,
- on_step=False,
- on_epoch=True,
- sync_dist=True,
- )
- self.log(
- "val/aeloss",
- aeloss,
- prog_bar=True,
- logger=True,
- on_step=False,
- on_epoch=True,
- sync_dist=True,
- )
- self.log_dict(log_dict_ae, sync_dist=True)
- self.log_dict(log_dict_disc, sync_dist=True)
- return self.log_dict
-
- def log_images(self, batch, **kwargs):
- log = dict()
- x = self.get_input(batch, self.image_key)
- x = x.to(self.device)
- # encode
- h = self.encoder(x)
- h = self.quant_conv(h)
- quant, _, _ = self.quantize(h)
- # decode
- x_rec = self.decode(quant)
- log["inputs"] = x
- log["reconstructions"] = x_rec
- return log
-
-
-class EMAVQ(VQModel):
- def __init__(
- self,
- ddconfig,
- lossconfig,
- n_embed,
- embed_dim,
- ckpt_path=None,
- ignore_keys=[],
- image_key="image",
- colorize_nlabels=None,
- monitor=None,
- remap=None,
- sane_index_shape=False, # tell vector quantizer to return indices as bhw
- ):
- super().__init__(
- ddconfig,
- lossconfig,
- n_embed,
- embed_dim,
- ckpt_path=None,
- ignore_keys=ignore_keys,
- image_key=image_key,
- colorize_nlabels=colorize_nlabels,
- monitor=monitor,
- )
- self.quantize = EMAVectorQuantizer(
- n_embed=n_embed, embedding_dim=embed_dim, beta=0.25, remap=remap
- )
-
- def configure_optimizers(self):
- lr = self.learning_rate
- # Remove self.quantize from parameter list since it is updated via EMA
- opt_ae = torch.optim.Adam(
- list(self.encoder.parameters())
- + list(self.decoder.parameters())
- + list(self.quant_conv.parameters())
- + list(self.post_quant_conv.parameters()),
- lr=lr,
- betas=(0.5, 0.9),
- )
- opt_disc = torch.optim.Adam(
- self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)
- )
- return [opt_ae, opt_disc], []
diff --git a/spaces/ICML2022/OFA/fairseq/examples/fast_noisy_channel/__init__.py b/spaces/ICML2022/OFA/fairseq/examples/fast_noisy_channel/__init__.py
deleted file mode 100644
index 9b248c3a24e12ad3da885a7f328c714942de2e6b..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/examples/fast_noisy_channel/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from . import noisy_channel_translation # noqa
-from . import noisy_channel_sequence_generator # noqa
-from . import noisy_channel_beam_search # noqa
diff --git a/spaces/ICML2022/OFA/fairseq/examples/paraphraser/paraphrase.py b/spaces/ICML2022/OFA/fairseq/examples/paraphraser/paraphrase.py
deleted file mode 100644
index d3422fb3db9a381b73a854d2379df214ebe544a2..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/examples/paraphraser/paraphrase.py
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/usr/bin/env python3 -u
-
-import argparse
-import fileinput
-import logging
-import os
-import sys
-
-from fairseq.models.transformer import TransformerModel
-
-
-logging.getLogger().setLevel(logging.INFO)
-
-
-def main():
- parser = argparse.ArgumentParser(description="")
- parser.add_argument("--en2fr", required=True, help="path to en2fr model")
- parser.add_argument(
- "--fr2en", required=True, help="path to fr2en mixture of experts model"
- )
- parser.add_argument(
- "--user-dir", help="path to fairseq examples/translation_moe/src directory"
- )
- parser.add_argument(
- "--num-experts",
- type=int,
- default=10,
- help="(keep at 10 unless using a different model)",
- )
- parser.add_argument(
- "files",
- nargs="*",
- default=["-"],
- help='input files to paraphrase; "-" for stdin',
- )
- args = parser.parse_args()
-
- if args.user_dir is None:
- args.user_dir = os.path.join(
- os.path.dirname(os.path.dirname(os.path.abspath(__file__))), # examples/
- "translation_moe",
- "src",
- )
- if os.path.exists(args.user_dir):
- logging.info("found user_dir:" + args.user_dir)
- else:
- raise RuntimeError(
- "cannot find fairseq examples/translation_moe/src "
- "(tried looking here: {})".format(args.user_dir)
- )
-
- logging.info("loading en2fr model from:" + args.en2fr)
- en2fr = TransformerModel.from_pretrained(
- model_name_or_path=args.en2fr,
- tokenizer="moses",
- bpe="sentencepiece",
- ).eval()
-
- logging.info("loading fr2en model from:" + args.fr2en)
- fr2en = TransformerModel.from_pretrained(
- model_name_or_path=args.fr2en,
- tokenizer="moses",
- bpe="sentencepiece",
- user_dir=args.user_dir,
- task="translation_moe",
- ).eval()
-
- def gen_paraphrases(en):
- fr = en2fr.translate(en)
- return [
- fr2en.translate(fr, inference_step_args={"expert": i})
- for i in range(args.num_experts)
- ]
-
- logging.info("Type the input sentence and press return:")
- for line in fileinput.input(args.files):
- line = line.strip()
- if len(line) == 0:
- continue
- for paraphrase in gen_paraphrases(line):
- print(paraphrase)
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/ICML2022/OFA/fairseq/examples/roberta/wsc/wsc_task.py b/spaces/ICML2022/OFA/fairseq/examples/roberta/wsc/wsc_task.py
deleted file mode 100644
index 602ea737ed75a33fddf44dd859e999ecfce2730d..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/examples/roberta/wsc/wsc_task.py
+++ /dev/null
@@ -1,401 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import json
-import os
-import tempfile
-
-import numpy as np
-import torch
-import torch.nn.functional as F
-from fairseq import utils
-from fairseq.data import (
- Dictionary,
- IdDataset,
- ListDataset,
- NestedDictionaryDataset,
- NumelDataset,
- NumSamplesDataset,
- PadDataset,
- SortDataset,
- data_utils,
- encoders,
-)
-from fairseq.tasks import LegacyFairseqTask, register_task
-
-from . import wsc_utils
-
-
-@register_task("wsc")
-class WSCTask(LegacyFairseqTask):
- """Task to finetune RoBERTa for Winograd Schemas."""
-
- @staticmethod
- def add_args(parser):
- """Add task-specific arguments to the parser."""
- parser.add_argument(
- "data", metavar="DIR", help="path to data directory; we load Some things you can ask: |