diff --git a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/quora/tests/test_api.py b/spaces/101-5/gpt4free/g4f/.v1/gpt4free/quora/tests/test_api.py deleted file mode 100644 index 2a4bb41b016429d13debe94c67a76cc6112f154c..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/quora/tests/test_api.py +++ /dev/null @@ -1,38 +0,0 @@ -import unittest -import requests -from unittest.mock import MagicMock -from gpt4free.quora.api import retry_request - - -class TestRetryRequest(unittest.TestCase): - def test_successful_request(self): - # Mock a successful request with a 200 status code - mock_response = MagicMock() - mock_response.status_code = 200 - requests.get = MagicMock(return_value=mock_response) - - # Call the function and assert that it returns the response - response = retry_request(requests.get, "http://example.com", max_attempts=3) - self.assertEqual(response.status_code, 200) - - def test_exponential_backoff(self): - # Mock a failed request that succeeds after two retries - mock_response = MagicMock() - mock_response.status_code = 200 - requests.get = MagicMock(side_effect=[requests.exceptions.RequestException] * 2 + [mock_response]) - - # Call the function and assert that it retries with exponential backoff - with self.assertLogs() as logs: - response = retry_request(requests.get, "http://example.com", max_attempts=3, delay=1) - self.assertEqual(response.status_code, 200) - self.assertGreaterEqual(len(logs.output), 2) - self.assertIn("Retrying in 1 seconds...", logs.output[0]) - self.assertIn("Retrying in 2 seconds...", logs.output[1]) - - def test_too_many_attempts(self): - # Mock a failed request that never succeeds - requests.get = MagicMock(side_effect=requests.exceptions.RequestException) - - # Call the function and assert that it raises an exception after the maximum number of attempts - with self.assertRaises(RuntimeError): - retry_request(requests.get, "http://example.com", max_attempts=3) diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe Photoshop CC 2014 [32 64 Bit] Activation Multilanguage How to Get the Most Out of It.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe Photoshop CC 2014 [32 64 Bit] Activation Multilanguage How to Get the Most Out of It.md deleted file mode 100644 index aab03fd505bf0c333f95d5c4322e307917bbcae7..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe Photoshop CC 2014 [32 64 Bit] Activation Multilanguage How to Get the Most Out of It.md +++ /dev/null @@ -1,101 +0,0 @@ - -

Adobe Photoshop CC 2014 [32 64 Bit] Activation Multilanguage: A Comprehensive Guide

-

Adobe Photoshop is the most popular and powerful image editing software in the world. It allows you to create, edit, and enhance photos, graphics, and designs with a variety of tools and features. Whether you are a professional designer, photographer, or hobbyist, Adobe Photoshop can help you achieve your creative vision.

-

In this article, we will introduce you to Adobe Photoshop CC 2014, the latest version of the software that was released in June 2014. We will explain what Adobe Photoshop CC 2014 is, what are its main features, how to install and activate it, and how to use it for design and photography. By the end of this article, you will have a better understanding of Adobe Photoshop CC 2014 and how to make the most of it.

-

Adobe Photoshop CC 2014 [32 64 Bit] Activation Multilanguagel


DOWNLOAD ===== https://byltly.com/2uKze0



-

What is Adobe Photoshop CC 2014?

-

Adobe Photoshop CC 2014 is the fourteenth major release of Adobe Photoshop, which is part of the Adobe Creative Cloud subscription service. It is also known as Adobe Photoshop 15 or Adobe Photoshop 2014. It is available for Windows and Mac OS X operating systems, and it supports both 32-bit and 64-bit architectures.

-

The main features of Adobe Photoshop CC 2014

-

Adobe Photoshop CC 2014 introduces several new features and enhancements that improve the performance, functionality, and usability of the software. Some of the most notable new features are:

- -

The system requirements for Adobe Photoshop CC 2014

-

To run Adobe Photoshop CC 2014 smoothly on your computer, you need to meet the following minimum system requirements:

- - - - - - - - - - - - - - - - - - - - - - -
Operating systemProcessorRAMHard disk spaceGraphics card
Windows 7 SP1 or later (32-bit or 64-bit)Intel Pentium 4 or AMD Athlon 64 processor (2 GHz or faster)2 GB (8 GB recommended)2 GB of available hard-disk space for installation; additional free space required during installation (cannot install on removable flash storage devices)1024 x 768 display (1280 x 800 recommended) with OpenGL® 2.0–capable system
Mac OS X v10.7 or later (64-bit only)Multicore Intel processor with 64-bit support2 GB (8 GB recommended)3.2 GB of available hard-disk space for installation; additional free space required during installation (cannot install on a volume that uses a case-sensitive file system or on removable flash storage devices)1024 x 768 display (1280 x 800 recommended) with OpenGL® 2.0–capable system
-

How to install and activate Adobe Photoshop CC 2014?

-

To install and activate Adobe Photoshop CC 2014 on your computer, you need to follow these steps:

-

Adobe Photoshop CC 2014 crack download
-Adobe Photoshop CC 2014 multilingual portable
-Adobe Photoshop CC 2014 serial number
-Adobe Photoshop CC 2014 offline installer
-Adobe Photoshop CC 2014 free trial
-Adobe Photoshop CC 2014 full version
-Adobe Photoshop CC 2014 keygen
-Adobe Photoshop CC 2014 system requirements
-Adobe Photoshop CC 2014 tutorial
-Adobe Photoshop CC 2014 update
-Adobe Photoshop CC 2014 features
-Adobe Photoshop CC 2014 license key
-Adobe Photoshop CC 2014 activation code
-Adobe Photoshop CC 2014 patch
-Adobe Photoshop CC 2014 direct download link
-Adobe Photoshop CC 2014 torrent
-Adobe Photoshop CC 2014 mac
-Adobe Photoshop CC 2014 windows
-Adobe Photoshop CC 2014 x64 bit
-Adobe Photoshop CC 2014 x32 bit
-Adobe Photoshop CC 2014 latest version
-Adobe Photoshop CC 2014 review
-Adobe Photoshop CC 2014 tips and tricks
-Adobe Photoshop CC 2014 plugins
-Adobe Photoshop CC 2014 brushes
-Adobe Photoshop CC 2014 presets
-Adobe Photoshop CC 2014 filters
-Adobe Photoshop CC 2014 actions
-Adobe Photoshop CC 2014 fonts
-Adobe Photoshop CC 2014 tools
-Adobe Photoshop CC 2014 shortcuts
-Adobe Photoshop CC 2014 layers
-Adobe Photoshop CC 2014 masks
-Adobe Photoshop CC 2014 smart objects
-Adobe Photoshop CC 2014 adjustment layers
-Adobe Photoshop CC 2014 blending modes
-Adobe Photoshop CC 2014 selection tools
-Adobe Photoshop CC 2014 transform tools
-Adobe Photoshop CC 2014 crop tool
-Adobe Photoshop CC 2014 healing tools
-Adobe Photoshop CC 2014 clone stamp tool
-Adobe Photoshop CC 2014 pen tool
-Adobe Photoshop CC 2014 text tool
-Adobe Photoshop CC 2014 shape tool
-Adobe Photoshop CC 2014 gradient tool
-Adobe Photoshop CC 2014 paint bucket tool
-Adobe Photoshop CC 2014 eraser tool
-Adobe Photoshop CC 2014 dodge and burn tools
-Adobe Photoshop CC 2014 sponge tool

-

Downloading the setup files

-

You can download the setup files for Adobe Photoshop CC 2014 from the official website of Adobe or from other trusted sources online. Make sure you download the correct version for your operating system and architecture (32-bit or 64-bit). The setup files are usually compressed in ZIP or RAR format, so you need to extract them before installing.

-

Installing Adobe Photoshop CC 2014

-

To install Adobe Photoshop CC 2014 on your computer, you need to run the setup.exe file that you extracted from the downloaded file. Follow the instructions on the screen to complete the installation process. You may need to restart your computer after the installation is finished.

-

Activating Adobe Photoshop CC 2014 with a serial number or a patch

-

To activate Adobe Photoshop CC 2014 on your computer, you need to have a valid serial number or a patch that can bypass the activation process. A serial number is a unique code that identifies your license for using the software. A patch is a small program that modifies the original software code to remove the activation requirement.

-

You can obtain a serial number or a patch from various sources online, such as forums, blogs, or websites that offer cracked software. However, be careful when downloading these files as they may contain viruses or malware that can harm your computer. Also, using cracked software is illegal and unethical as it violates the terms and conditions of Adobe.

-

If you have a serial number for Adobe Photoshop CC 2014, you can enter it when prompted during the installation process or after launching the software for the first time. If you have a patch for Adobe Photoshop CC

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/E Elio Le Story Tese Torrent.md b/spaces/1gistliPinn/ChatGPT4/Examples/E Elio Le Story Tese Torrent.md deleted file mode 100644 index fd1b337e795a23e63573ed3fd0482a0f5ff547c7..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/E Elio Le Story Tese Torrent.md +++ /dev/null @@ -1,94 +0,0 @@ -
-

E Elio Le Story Tese Torrent: How to Download Their Music for Free

- -

Elio e le Storie Tese is an Italian comedy rock band that was formed in 1980. The band is known for their humorous and satirical lyrics, their eclectic musical style, and their live performances. The band has released 14 studio albums, 5 live albums, and several singles and compilations. Some of their most popular songs are "La terra dei cachi", "Mio cuggino", "Born to Be Abramo", and "La canzone mononota".

- -

If you are a fan of Elio e le Storie Tese and you want to download their music for free, you might be tempted to use a torrent site or a file-sharing platform that hosts pirated copies of their albums. However, this is not a legal or safe way to get their music. You might be breaking the law, violating the rights of the band and their record label, exposing yourself to malware or viruses, or risking legal troubles or penalties.

-

E Elio Le Story Tese Torrent


Download –––––>>> https://imgfil.com/2uxYMw



- -

The best way to download E Elio Le Story Tese Torrent legally and safely is to use a streaming service that offers their music in your preferred language and region. Some of the popular streaming platforms that have Elio e le Storie Tese in their library are:

- - - -

All these streaming services offer high-quality audio, as well as various payment options and customer support. However, they may not be available in all countries or regions, so you should check their availability and pricing before choosing one.

- -

How to Download E Elio Le Story Tese Torrent Illegally

- -

If you still want to download E Elio Le Story Tese Torrent illegally, you should be aware of the risks and consequences involved. Some of the notorious websites that offer Elio e le Storie Tese torrents are:

- - - -

These websites claim to provide free and fast downloads of E Elio Le Story Tese Torrent files in various formats and resolutions. However, they are not authorized by the original creators or distributors of the music, and they violate the copyright laws and intellectual property rights of the music industry. Moreover, they are risky and unsafe to use, as they may contain malware or viruses that can infect your device or steal your personal information. They may also expose you to legal troubles or penalties if you are caught downloading or sharing pirated content.

- -

Conclusion

- -

Elio e le Storie Tese is a band that will appeal to fans of comedy rock and Italian music. If you want to download their music for free, you have several options online, but not all of them are legal or safe. The best way to download E Elio Le Story Tese Torrent legally and safely is to use a streaming service that offers their music in your preferred language and region. However, if you choose to download E Elio Le Story Tese Torrent illegally, you should be aware of the risks and consequences involved.

- -

In this article, we have provided you with some information and tips on how to download E Elio Le Story Tese Torrent legally or illegally. We hope you have enjoyed reading this article and found it useful. Now go ahead and download E Elio Le Story Tese Torrent and enjoy their music!

-

-

Why Elio e le Storie Tese is a Unique Band

- -

Elio e le Storie Tese is not just a comedy rock band, but also a cultural phenomenon in Italy. The band has been praised for their originality, creativity, and versatility. They have experimented with various genres and styles, such as pop, rock, jazz, funk, metal, classical, folk, rap, and more. They have also collaborated with many famous artists and personalities, such as Luciano Pavarotti, Ennio Morricone, Giorgio Moroder, Renato Zero, Jovanotti, and Fabio Fazio.

- -

Elio e le Storie Tese is also known for their social and political satire, their parody of Italian stereotypes and clichés, and their criticism of the Italian society and media. The band has often used irony, sarcasm, absurdity, and nonsense to convey their messages and opinions. They have also created many fictional characters and alter egos, such as Rocco Tanica, Faso, Cesareo, Mangoni, Feiez, Elio Samaga Hukapan Kariyana Turu (the Sri Lankan version of Elio), and Il Complesso Misterioso (a fake band that competed in the Sanremo Music Festival).

- -

Elio e le Storie Tese is a band that has influenced many other artists and comedians in Italy and abroad. They have also received many awards and recognitions for their music and career. They have been nominated for several MTV Europe Music Awards and Italian Music Awards. They have also won the Critics' Award at the Sanremo Music Festival twice (in 1996 and 2013). In 2016, they announced their farewell tour, which ended in 2018 with a final concert in Milan.

- -

How to Support Elio e le Storie Tese

- -

If you love Elio e le Storie Tese and you want to support them, you can do so in various ways. Here are some suggestions:

- - - -

By supporting Elio e le Storie Tese, you are not only showing your appreciation for their music and artistry but also contributing to their legacy and impact on the Italian culture and society.

-

How to Discover More About Elio e le Storie Tese

- -

If you are curious about Elio e le Storie Tese and you want to discover more about their music and history, you can do so in various ways. Here are some suggestions:

- - - -

By discovering more about Elio e le Storie Tese, you are not only enriching your knowledge and appreciation for their music and artistry but also joining their loyal and passionate fan community.

- -

How to Share E Elio Le Story Tese Torrent with Others

- -

If you love E Elio Le Story Tese Torrent and you want to share it with others, you can do so in various ways. Here are some suggestions:

- - - -

By sharing E Elio Le Story Tese Torrent with others, you are not only spreading your love and enthusiasm for their music and artistry but also supporting their career and success.

-

Conclusion

- -

E Elio Le Story Tese Torrent is a keyword that refers to the illegal and unsafe way of downloading the music of Elio e le Storie Tese, an Italian comedy rock band that has been entertaining audiences since 1980. The band is known for their witty and satirical lyrics, their eclectic musical style, and their energetic live performances. The band has released 14 studio albums, 5 live albums, and several singles and compilations.

- -

In this article, we have provided you with some information and tips on how to download E Elio Le Story Tese Torrent legally and safely, how to discover more about Elio e le Storie Tese, and how to share their music with others. We hope you have enjoyed reading this article and found it useful. Now go ahead and enjoy E Elio Le Story Tese Torrent online!

3cee63e6c2
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download attack on titan mod APK for Android - Free and Easy.md b/spaces/1phancelerku/anime-remove-background/Download attack on titan mod APK for Android - Free and Easy.md deleted file mode 100644 index 7cbab362d0d231d19654830830050385a0796eb7..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download attack on titan mod APK for Android - Free and Easy.md +++ /dev/null @@ -1,126 +0,0 @@ -
-

Attack on Titan Mod Free Download in APKPure

-

If you are a fan of the popular anime and manga series Attack on Titan, you might be interested in playing the game based on it. However, if you want to enjoy some extra features and enhancements, you might want to try the mod version of the game. In this article, we will show you how to download and install Attack on Titan mod free from APKPure, one of the best sources for Android apps and games.

-

attack on titan mod free download in apkpure


Download Ziphttps://jinyurl.com/2uNMLd



-

What is Attack on Titan?

-

Attack on Titan is a Japanese manga series written and illustrated by Hajime Isayama. It is set in a world where humanity lives inside cities surrounded by three enormous walls that protect them from gigantic man-eating humanoids referred to as Titans. The story follows Eren Yeager, who vows to exterminate the Titans after they bring about the destruction of his hometown and the death of his mother.

-

The manga series has been adapted into an anime television series, which has four seasons so far. The anime series has received critical acclaim and commercial success, winning several awards and becoming one of the best-selling manga series of all time.

-

The game based on the anime series is called Attack on Titan / A.O.T. Wings of Freedom. It is an action hack and slash game that lets you play as one of the beloved characters from the series. You can use the Three Dimensional Maneuver Gear to fly around and fight against the Titans. You can also discover the story from the anime, with some original twists, and experience the thrill of being in the anime.

-

attack on titan mod apk download for android
-attack on titan mod minecraft pe free download
-attack on titan mod apk unlimited money and gems
-attack on titan mod menu apk download latest version
-attack on titan mod for gta san andreas free download
-attack on titan mod apk offline no root
-attack on titan mod pack for minecraft java edition
-attack on titan mod apk rexdl
-attack on titan mod among us free download
-attack on titan mod apk obb highly compressed
-attack on titan mod for roblox free download
-attack on titan mod apk unlimited everything
-attack on titan mod for skyrim special edition
-attack on titan mod apk happymod
-attack on titan mod for gta 5 pc free download
-attack on titan mod apk revdl
-attack on titan mod for sims 4 free download
-attack on titan mod apk all characters unlocked
-attack on titan mod for fallout 4 xbox one
-attack on titan mod apk android 1
-attack on titan mod for terraria free download
-attack on titan mod apk god mode
-attack on titan mod for left 4 dead 2 free download
-attack on titan mod apk unlimited coins and diamonds
-attack on titan mod for stardew valley free download
-attack on titan mod apk no ads
-attack on titan mod for ark survival evolved free download
-attack on titan mod apk unlimited health and stamina
-attack on titan mod for subnautica free download
-attack on titan mod apk latest update
-attack on titan mod for starbound free download
-attack on titan mod apk no verification
-attack on titan mod for mount and blade warband free download
-attack on titan mod apk unlimited blades and gas
-attack on titan mod for rimworld free download
-attack on titan mod apk no human verification
-attack on titan mod for dragon age inquisition free download
-attack on titan mod apk unlimited skills and items
-attack on titan mod for xcom 2 free download
-attack on titan mod apk one hit kill
-attack on titan mod for witcher 3 free download
-attack on titan mod apk online multiplayer
-attack on titan mod for dark souls 3 free download
-attack on titan mod apk all episodes unlocked
-attack on titan mod for dying light free download
-attack on titan mod apk original version
-attack on titan mod for just cause 3 free download

-

Some of the main features and characters of the game are:

- -

What is APKPure?

-

APKPure is a website that offers APK files for Android apps and games. APK stands for Android Package Kit, which is a file format that contains all the elements needed to install an app or game on your Android device. Normally, you would download apps and games from Google Play Store, which is the official source for Android apps. However, there are some reasons why you might want to use APKPure instead.

-

Some of the benefits of using APKPure are:

- -

However, there are also some risks of using APKPure that you should be aware of:

- -

Therefore, you should always be careful and cautious when using APKPure or any other third-party source for Android apps and games. You should always check the ratings, reviews, and permissions of the files before downloading them. You should also scan the files with a reliable antivirus software before installing them. And you should always backup your data and device before trying any new app or game.

-

How to download and install Attack on Titan mod in APKPure?

-

If you want to try the mod version of Attack on Titan / A.O.T. Wings of Freedom, which has some extra features such as unlimited money, unlocked characters, and more, you can download it from APKPure website. Here are the steps to download and install Attack on Titan mod in APKPure:

-
    -
  1. Go to the APKPure website and search for Attack on Titan mod or click on this link: [Attack on Titan Mod APK 1.1.2.12 - Download Attack on Titan Mod for Android]
  2. -
  3. Click on the green Download APK button and wait for the file to be downloaded to your device
  4. -
  5. Once the file is downloaded, go to your device settings and enable the option to install apps from unknown sources. This will allow you to install apps that are not from Google Play Store
  6. -
  7. Locate the downloaded APK file in your device storage and tap on it to start the installation process
  8. -
  9. Follow the instructions on the screen and grant the necessary permissions to the app
  10. -
  11. Wait for the installation to finish and then launch the app from your app drawer or home screen
  12. -
-

Congratulations! You have successfully downloaded and installed Attack on Titan mod in APKPure. You can now enjoy playing the game with some extra features and enhancements.

-

Conclusion

-

In this article, we have shown you how to download and install Attack on Titan mod free from APKPure, one of the best sources for Android apps and games. We have also explained what is Attack on Titan, what is APKPure, and what are the benefits and risks of using APKPure. We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below.

-

If you liked this article, please share it with your friends and family who might be interested in playing Attack on Titan mod. And if you want to read more articles like this, please subscribe to our newsletter or follow us on social media. Thank you for reading!

-

FAQs

-

What is an APK file and why do I need it?

-

An APK file is a file format that contains all the elements needed to install an app or game on your Android device. You need an APK file when you want to download an app or game that is not available in Google Play Store or that is not compatible with your device or Android version.

-

Is APKPure safe and reliable?

-

APKPure is one of the most popular and trusted websites that offers APK files for Android apps and games. It has millions of users and thousands of positive reviews. However, like any other third-party source, it also has some risks of downloading malicious or harmful files that can damage your device or steal your data. Therefore, you should always be careful and cautious when using APKPure or any other third-party source for Android apps and games.

-

What are the requirements and compatibility of Attack on Titan mod?

-

The requirements and compatibility of Attack on Titan mod are as follows:

- -

What are the features and advantages of Attack on Titan mod?

-

The features and advantages of Attack on Titan mod are as follows:

- -

How can I update or uninstall Attack on Titan mod?

-

If you want to update or uninstall Attack on Titan mod, you can follow these steps:

-
    -
  1. To update the app, you need to download the latest version of the APK file from APKPure website and install it over the existing app. You don't need to uninstall the previous version, but you should backup your data before updating
  2. -
  3. To uninstall the app, you need to go to your device settings and find the app in the list of installed apps. Then, you need to tap on the app and select the option to uninstall it. You should also delete the APK file from your device storage
  4. -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_speech/data_gen/tts/txt_processors/zh.py b/spaces/AIGC-Audio/AudioGPT/text_to_speech/data_gen/tts/txt_processors/zh.py deleted file mode 100644 index 4b26f6ba130822c2de9d0a0a91e0cb1a9e6d79f9..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/text_to_speech/data_gen/tts/txt_processors/zh.py +++ /dev/null @@ -1,117 +0,0 @@ -import re -import jieba -from pypinyin import pinyin, Style -from text_to_speech.utils.text.text_norm import NSWNormalizer -from text_to_speech.data_gen.tts.txt_processors.base_text_processor import BaseTxtProcessor, register_txt_processors -from text_to_speech.utils.text.text_encoder import PUNCS, is_sil_phoneme - -ALL_SHENMU = ['zh', 'ch', 'sh', 'b', 'p', 'm', 'f', 'd', 't', 'n', 'l', 'g', 'k', 'h', 'j', - 'q', 'x', 'r', 'z', 'c', 's', 'y', 'w'] - - -@register_txt_processors('zh') -class TxtProcessor(BaseTxtProcessor): - table = {ord(f): ord(t) for f, t in zip( - u':,。!?【】()%#@&1234567890', - u':,.!?[]()%#@&1234567890')} - - @staticmethod - def sp_phonemes(): - return ['|', '#'] - - @staticmethod - def preprocess_text(text): - text = text.translate(TxtProcessor.table) - text = NSWNormalizer(text).normalize(remove_punc=False).lower() - text = re.sub("[\'\"()]+", "", text) - text = re.sub("[-]+", " ", text) - text = re.sub(f"[^ A-Za-z\u4e00-\u9fff{PUNCS}]", "", text) - text = re.sub(f"([{PUNCS}])+", r"\1", text) # !! -> ! - text = re.sub(f"([{PUNCS}])", r" \1 ", text) - text = re.sub(rf"\s+", r"", text) - text = re.sub(rf"[A-Za-z]+", r"$", text) - return text - - @classmethod - def pinyin_with_en(cls, txt, style): - x = pinyin(txt, style) - x = [t[0] for t in x] - x_ = [] - for t in x: - if '$' not in t: - x_.append(t) - else: - x_ += list(t) - x_ = [t if t != '$' else 'ENG' for t in x_] - return x_ - - @classmethod - def process(cls, txt, pre_align_args): - txt = cls.preprocess_text(txt) - txt = txt.replace("嗯", "蒽") # pypin会把嗯的声母韵母识别为'',导致ph2word出现错位。 - # https://blog.csdn.net/zhoulei124/article/details/89055403 - - shengmu = cls.pinyin_with_en(txt, style=Style.INITIALS) - yunmu = cls.pinyin_with_en(txt, style= - Style.FINALS_TONE3 if pre_align_args['use_tone'] else Style.FINALS) - assert len(shengmu) == len(yunmu) - for i in range(len(shengmu)): - if shengmu[i] == '' and yunmu[i] == '': - print(f"发现了一个声母韵母都是空的文字:{txt[i]}") - ph_list = [] - for a, b in zip(shengmu, yunmu): - if a == b: - ph_list += [a] - else: - ph_list += [a + "%" + b] - seg_list = '#'.join(jieba.cut(txt)) - assert len(ph_list) == len([s for s in seg_list if s != '#']), (ph_list, seg_list) - - # 加入词边界'#' - ph_list_ = [] - seg_idx = 0 - for p in ph_list: - if seg_list[seg_idx] == '#': - ph_list_.append('#') - seg_idx += 1 - elif len(ph_list_) > 0: - ph_list_.append("|") - seg_idx += 1 - finished = False - if not finished: - ph_list_ += [x for x in p.split("%") if x != ''] - - ph_list = ph_list_ - - # 去除静音符号周围的词边界标记 [..., '#', ',', '#', ...] - sil_phonemes = list(PUNCS) + TxtProcessor.sp_phonemes() - ph_list_ = [] - for i in range(0, len(ph_list), 1): - if ph_list[i] != '#' or (ph_list[i - 1] not in sil_phonemes and ph_list[i + 1] not in sil_phonemes): - ph_list_.append(ph_list[i]) - ph_list = ph_list_ - - txt_struct = [[w, []] for w in txt] - i = 0 - for ph in ph_list: - if ph == '|' or ph == '#': - i += 1 - continue - # elif ph in [',', '.']: - elif ph in [',', '.', '?', '!', ':']: - i += 1 - txt_struct[i][1].append(ph) - i += 1 - continue - txt_struct[i][1].append(ph) - # return ph_list, txt - txt_struct.insert(0, ['', ['']]) - txt_struct.append(['', ['']]) - return txt_struct, txt - - -if __name__ == '__main__': - # t = 'simon演唱过后,simon还进行了simon精彩的文艺演出simon.' - t = '你当我傻啊?脑子那么大怎么塞进去???' - phs, txt = TxtProcessor.process(t, {'use_tone': True}) - print(phs, txt) diff --git a/spaces/AIGC-Audio/Make_An_Audio/ldm/modules/x_transformer.py b/spaces/AIGC-Audio/Make_An_Audio/ldm/modules/x_transformer.py deleted file mode 100644 index 5fc15bf9cfe0111a910e7de33d04ffdec3877576..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/Make_An_Audio/ldm/modules/x_transformer.py +++ /dev/null @@ -1,641 +0,0 @@ -"""shout-out to https://github.com/lucidrains/x-transformers/tree/main/x_transformers""" -import torch -from torch import nn, einsum -import torch.nn.functional as F -from functools import partial -from inspect import isfunction -from collections import namedtuple -from einops import rearrange, repeat, reduce - -# constants - -DEFAULT_DIM_HEAD = 64 - -Intermediates = namedtuple('Intermediates', [ - 'pre_softmax_attn', - 'post_softmax_attn' -]) - -LayerIntermediates = namedtuple('Intermediates', [ - 'hiddens', - 'attn_intermediates' -]) - - -class AbsolutePositionalEmbedding(nn.Module): - def __init__(self, dim, max_seq_len): - super().__init__() - self.emb = nn.Embedding(max_seq_len, dim) - self.init_() - - def init_(self): - nn.init.normal_(self.emb.weight, std=0.02) - - def forward(self, x): - n = torch.arange(x.shape[1], device=x.device) - return self.emb(n)[None, :, :] - - -class FixedPositionalEmbedding(nn.Module): - def __init__(self, dim): - super().__init__() - inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim)) - self.register_buffer('inv_freq', inv_freq) - - def forward(self, x, seq_dim=1, offset=0): - t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) + offset - sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq) - emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1) - return emb[None, :, :] - - -# helpers - -def exists(val): - return val is not None - - -def default(val, d): - if exists(val): - return val - return d() if isfunction(d) else d - - -def always(val): - def inner(*args, **kwargs): - return val - return inner - - -def not_equals(val): - def inner(x): - return x != val - return inner - - -def equals(val): - def inner(x): - return x == val - return inner - - -def max_neg_value(tensor): - return -torch.finfo(tensor.dtype).max - - -# keyword argument helpers - -def pick_and_pop(keys, d): - values = list(map(lambda key: d.pop(key), keys)) - return dict(zip(keys, values)) - - -def group_dict_by_key(cond, d): - return_val = [dict(), dict()] - for key in d.keys(): - match = bool(cond(key)) - ind = int(not match) - return_val[ind][key] = d[key] - return (*return_val,) - - -def string_begins_with(prefix, str): - return str.startswith(prefix) - - -def group_by_key_prefix(prefix, d): - return group_dict_by_key(partial(string_begins_with, prefix), d) - - -def groupby_prefix_and_trim(prefix, d): - kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d) - kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items()))) - return kwargs_without_prefix, kwargs - - -# classes -class Scale(nn.Module): - def __init__(self, value, fn): - super().__init__() - self.value = value - self.fn = fn - - def forward(self, x, **kwargs): - x, *rest = self.fn(x, **kwargs) - return (x * self.value, *rest) - - -class Rezero(nn.Module): - def __init__(self, fn): - super().__init__() - self.fn = fn - self.g = nn.Parameter(torch.zeros(1)) - - def forward(self, x, **kwargs): - x, *rest = self.fn(x, **kwargs) - return (x * self.g, *rest) - - -class ScaleNorm(nn.Module): - def __init__(self, dim, eps=1e-5): - super().__init__() - self.scale = dim ** -0.5 - self.eps = eps - self.g = nn.Parameter(torch.ones(1)) - - def forward(self, x): - norm = torch.norm(x, dim=-1, keepdim=True) * self.scale - return x / norm.clamp(min=self.eps) * self.g - - -class RMSNorm(nn.Module): - def __init__(self, dim, eps=1e-8): - super().__init__() - self.scale = dim ** -0.5 - self.eps = eps - self.g = nn.Parameter(torch.ones(dim)) - - def forward(self, x): - norm = torch.norm(x, dim=-1, keepdim=True) * self.scale - return x / norm.clamp(min=self.eps) * self.g - - -class Residual(nn.Module): - def forward(self, x, residual): - return x + residual - - -class GRUGating(nn.Module): - def __init__(self, dim): - super().__init__() - self.gru = nn.GRUCell(dim, dim) - - def forward(self, x, residual): - gated_output = self.gru( - rearrange(x, 'b n d -> (b n) d'), - rearrange(residual, 'b n d -> (b n) d') - ) - - return gated_output.reshape_as(x) - - -# feedforward - -class GEGLU(nn.Module): - def __init__(self, dim_in, dim_out): - super().__init__() - self.proj = nn.Linear(dim_in, dim_out * 2) - - def forward(self, x): - x, gate = self.proj(x).chunk(2, dim=-1) - return x * F.gelu(gate) - - -class FeedForward(nn.Module): - def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): - super().__init__() - inner_dim = int(dim * mult) - dim_out = default(dim_out, dim) - project_in = nn.Sequential( - nn.Linear(dim, inner_dim), - nn.GELU() - ) if not glu else GEGLU(dim, inner_dim) - - self.net = nn.Sequential( - project_in, - nn.Dropout(dropout), - nn.Linear(inner_dim, dim_out) - ) - - def forward(self, x): - return self.net(x) - - -# attention. -class Attention(nn.Module): - def __init__( - self, - dim, - dim_head=DEFAULT_DIM_HEAD, - heads=8, - causal=False, - mask=None, - talking_heads=False, - sparse_topk=None, - use_entmax15=False, - num_mem_kv=0, - dropout=0., - on_attn=False - ): - super().__init__() - if use_entmax15: - raise NotImplementedError("Check out entmax activation instead of softmax activation!") - self.scale = dim_head ** -0.5 - self.heads = heads - self.causal = causal - self.mask = mask - - inner_dim = dim_head * heads - - self.to_q = nn.Linear(dim, inner_dim, bias=False) - self.to_k = nn.Linear(dim, inner_dim, bias=False) - self.to_v = nn.Linear(dim, inner_dim, bias=False) - self.dropout = nn.Dropout(dropout) - - # talking heads - self.talking_heads = talking_heads - if talking_heads: - self.pre_softmax_proj = nn.Parameter(torch.randn(heads, heads)) - self.post_softmax_proj = nn.Parameter(torch.randn(heads, heads)) - - # explicit topk sparse attention - self.sparse_topk = sparse_topk - - # entmax - #self.attn_fn = entmax15 if use_entmax15 else F.softmax - self.attn_fn = F.softmax - - # add memory key / values - self.num_mem_kv = num_mem_kv - if num_mem_kv > 0: - self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) - self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) - - # attention on attention - self.attn_on_attn = on_attn - self.to_out = nn.Sequential(nn.Linear(inner_dim, dim * 2), nn.GLU()) if on_attn else nn.Linear(inner_dim, dim) - - def forward( - self, - x, - context=None, - mask=None, - context_mask=None, - rel_pos=None, - sinusoidal_emb=None, - prev_attn=None, - mem=None - ): - b, n, _, h, talking_heads, device = *x.shape, self.heads, self.talking_heads, x.device - kv_input = default(context, x) - - q_input = x - k_input = kv_input - v_input = kv_input - - if exists(mem): - k_input = torch.cat((mem, k_input), dim=-2) - v_input = torch.cat((mem, v_input), dim=-2) - - if exists(sinusoidal_emb): - # in shortformer, the query would start at a position offset depending on the past cached memory - offset = k_input.shape[-2] - q_input.shape[-2] - q_input = q_input + sinusoidal_emb(q_input, offset=offset) - k_input = k_input + sinusoidal_emb(k_input) - - q = self.to_q(q_input) - k = self.to_k(k_input) - v = self.to_v(v_input) - - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v)) - - input_mask = None - if any(map(exists, (mask, context_mask))): - q_mask = default(mask, lambda: torch.ones((b, n), device=device).bool()) - k_mask = q_mask if not exists(context) else context_mask - k_mask = default(k_mask, lambda: torch.ones((b, k.shape[-2]), device=device).bool()) - q_mask = rearrange(q_mask, 'b i -> b () i ()') - k_mask = rearrange(k_mask, 'b j -> b () () j') - input_mask = q_mask * k_mask - - if self.num_mem_kv > 0: - mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b=b), (self.mem_k, self.mem_v)) - k = torch.cat((mem_k, k), dim=-2) - v = torch.cat((mem_v, v), dim=-2) - if exists(input_mask): - input_mask = F.pad(input_mask, (self.num_mem_kv, 0), value=True) - - dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale - mask_value = max_neg_value(dots) - - if exists(prev_attn): - dots = dots + prev_attn - - pre_softmax_attn = dots - - if talking_heads: - dots = einsum('b h i j, h k -> b k i j', dots, self.pre_softmax_proj).contiguous() - - if exists(rel_pos): - dots = rel_pos(dots) - - if exists(input_mask): - dots.masked_fill_(~input_mask, mask_value) - del input_mask - - if self.causal: - i, j = dots.shape[-2:] - r = torch.arange(i, device=device) - mask = rearrange(r, 'i -> () () i ()') < rearrange(r, 'j -> () () () j') - mask = F.pad(mask, (j - i, 0), value=False) - dots.masked_fill_(mask, mask_value) - del mask - - if exists(self.sparse_topk) and self.sparse_topk < dots.shape[-1]: - top, _ = dots.topk(self.sparse_topk, dim=-1) - vk = top[..., -1].unsqueeze(-1).expand_as(dots) - mask = dots < vk - dots.masked_fill_(mask, mask_value) - del mask - - attn = self.attn_fn(dots, dim=-1) - post_softmax_attn = attn - - attn = self.dropout(attn) - - if talking_heads: - attn = einsum('b h i j, h k -> b k i j', attn, self.post_softmax_proj).contiguous() - - out = einsum('b h i j, b h j d -> b h i d', attn, v) - out = rearrange(out, 'b h n d -> b n (h d)') - - intermediates = Intermediates( - pre_softmax_attn=pre_softmax_attn, - post_softmax_attn=post_softmax_attn - ) - - return self.to_out(out), intermediates - - -class AttentionLayers(nn.Module): - def __init__( - self, - dim, - depth, - heads=8, - causal=False, - cross_attend=False, - only_cross=False, - use_scalenorm=False, - use_rmsnorm=False, - use_rezero=False, - rel_pos_num_buckets=32, - rel_pos_max_distance=128, - position_infused_attn=False, - custom_layers=None, - sandwich_coef=None, - par_ratio=None, - residual_attn=False, - cross_residual_attn=False, - macaron=False, - pre_norm=True, - gate_residual=False, - **kwargs - ): - super().__init__() - ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs) - attn_kwargs, _ = groupby_prefix_and_trim('attn_', kwargs) - - dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD) - - self.dim = dim - self.depth = depth - self.layers = nn.ModuleList([]) - - self.has_pos_emb = position_infused_attn - self.pia_pos_emb = FixedPositionalEmbedding(dim) if position_infused_attn else None - self.rotary_pos_emb = always(None) - - assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance' - self.rel_pos = None - - self.pre_norm = pre_norm - - self.residual_attn = residual_attn - self.cross_residual_attn = cross_residual_attn - - norm_class = ScaleNorm if use_scalenorm else nn.LayerNorm - norm_class = RMSNorm if use_rmsnorm else norm_class - norm_fn = partial(norm_class, dim) - - norm_fn = nn.Identity if use_rezero else norm_fn - branch_fn = Rezero if use_rezero else None - - if cross_attend and not only_cross: - default_block = ('a', 'c', 'f') - elif cross_attend and only_cross: - default_block = ('c', 'f') - else: - default_block = ('a', 'f') - - if macaron: - default_block = ('f',) + default_block - - if exists(custom_layers): - layer_types = custom_layers - elif exists(par_ratio): - par_depth = depth * len(default_block) - assert 1 < par_ratio <= par_depth, 'par ratio out of range' - default_block = tuple(filter(not_equals('f'), default_block)) - par_attn = par_depth // par_ratio - depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper - par_width = (depth_cut + depth_cut // par_attn) // par_attn - assert len(default_block) <= par_width, 'default block is too large for par_ratio' - par_block = default_block + ('f',) * (par_width - len(default_block)) - par_head = par_block * par_attn - layer_types = par_head + ('f',) * (par_depth - len(par_head)) - elif exists(sandwich_coef): - assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth' - layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef - else: - layer_types = default_block * depth - - self.layer_types = layer_types - self.num_attn_layers = len(list(filter(equals('a'), layer_types))) - - for layer_type in self.layer_types: - if layer_type == 'a': - layer = Attention(dim, heads=heads, causal=causal, **attn_kwargs) - elif layer_type == 'c': - layer = Attention(dim, heads=heads, **attn_kwargs) - elif layer_type == 'f': - layer = FeedForward(dim, **ff_kwargs) - layer = layer if not macaron else Scale(0.5, layer) - else: - raise Exception(f'invalid layer type {layer_type}') - - if isinstance(layer, Attention) and exists(branch_fn): - layer = branch_fn(layer) - - if gate_residual: - residual_fn = GRUGating(dim) - else: - residual_fn = Residual() - - self.layers.append(nn.ModuleList([ - norm_fn(), - layer, - residual_fn - ])) - - def forward( - self, - x, - context=None, - mask=None, - context_mask=None, - mems=None, - return_hiddens=False - ): - hiddens = [] - intermediates = [] - prev_attn = None - prev_cross_attn = None - - mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers - - for ind, (layer_type, (norm, block, residual_fn)) in enumerate(zip(self.layer_types, self.layers)): - is_last = ind == (len(self.layers) - 1) - - if layer_type == 'a': - hiddens.append(x) - layer_mem = mems.pop(0) - - residual = x - - if self.pre_norm: - x = norm(x) - - if layer_type == 'a': - out, inter = block(x, mask=mask, sinusoidal_emb=self.pia_pos_emb, rel_pos=self.rel_pos, - prev_attn=prev_attn, mem=layer_mem) - elif layer_type == 'c': - out, inter = block(x, context=context, mask=mask, context_mask=context_mask, prev_attn=prev_cross_attn) - elif layer_type == 'f': - out = block(x) - - x = residual_fn(out, residual) - - if layer_type in ('a', 'c'): - intermediates.append(inter) - - if layer_type == 'a' and self.residual_attn: - prev_attn = inter.pre_softmax_attn - elif layer_type == 'c' and self.cross_residual_attn: - prev_cross_attn = inter.pre_softmax_attn - - if not self.pre_norm and not is_last: - x = norm(x) - - if return_hiddens: - intermediates = LayerIntermediates( - hiddens=hiddens, - attn_intermediates=intermediates - ) - - return x, intermediates - - return x - - -class Encoder(AttentionLayers): - def __init__(self, **kwargs): - assert 'causal' not in kwargs, 'cannot set causality on encoder' - super().__init__(causal=False, **kwargs) - - - -class TransformerWrapper(nn.Module): - def __init__( - self, - *, - num_tokens, - max_seq_len, - attn_layers, - emb_dim=None, - max_mem_len=0., - emb_dropout=0., - num_memory_tokens=None, - tie_embedding=False, - use_pos_emb=True - ): - super().__init__() - assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder' - - dim = attn_layers.dim - emb_dim = default(emb_dim, dim) - - self.max_seq_len = max_seq_len - self.max_mem_len = max_mem_len - self.num_tokens = num_tokens - - self.token_emb = nn.Embedding(num_tokens, emb_dim) - self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len) if ( - use_pos_emb and not attn_layers.has_pos_emb) else always(0) - self.emb_dropout = nn.Dropout(emb_dropout) - - self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity() - self.attn_layers = attn_layers - self.norm = nn.LayerNorm(dim) - - self.init_() - - self.to_logits = nn.Linear(dim, num_tokens) if not tie_embedding else lambda t: t @ self.token_emb.weight.t() - - # memory tokens (like [cls]) from Memory Transformers paper - num_memory_tokens = default(num_memory_tokens, 0) - self.num_memory_tokens = num_memory_tokens - if num_memory_tokens > 0: - self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim)) - - # let funnel encoder know number of memory tokens, if specified - if hasattr(attn_layers, 'num_memory_tokens'): - attn_layers.num_memory_tokens = num_memory_tokens - - def init_(self): - nn.init.normal_(self.token_emb.weight, std=0.02) - - def forward( - self, - x, - return_embeddings=False, - mask=None, - return_mems=False, - return_attn=False, - mems=None, - **kwargs - ): - b, n, device, num_mem = *x.shape, x.device, self.num_memory_tokens - x = self.token_emb(x) - x += self.pos_emb(x) - x = self.emb_dropout(x) - - x = self.project_emb(x) - - if num_mem > 0: - mem = repeat(self.memory_tokens, 'n d -> b n d', b=b) - x = torch.cat((mem, x), dim=1) - - # auto-handle masking after appending memory tokens - if exists(mask): - mask = F.pad(mask, (num_mem, 0), value=True) - - x, intermediates = self.attn_layers(x, mask=mask, mems=mems, return_hiddens=True, **kwargs) - x = self.norm(x) - - mem, x = x[:, :num_mem], x[:, num_mem:] - - out = self.to_logits(x) if not return_embeddings else x - - if return_mems: - hiddens = intermediates.hiddens - new_mems = list(map(lambda pair: torch.cat(pair, dim=-2), zip(mems, hiddens))) if exists(mems) else hiddens - new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems)) - return out, new_mems - - if return_attn: - attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates)) - return out, attn_maps - - return out - diff --git a/spaces/AIGText/GlyphControl/annotator/render_images.py b/spaces/AIGText/GlyphControl/annotator/render_images.py deleted file mode 100644 index 2028212504c526fb3e042177b7bbe4c6a4f8e287..0000000000000000000000000000000000000000 --- a/spaces/AIGText/GlyphControl/annotator/render_images.py +++ /dev/null @@ -1,95 +0,0 @@ -from PIL import Image, ImageFont, ImageDraw -import random - -# resize height to image_height first, then shrink or pad to image_width -def resize_and_pad_image(pil_image, image_size): - - if isinstance(image_size, (tuple, list)) and len(image_size) == 2: - image_width, image_height = image_size - elif isinstance(image_size, int): - image_width = image_height = image_size - else: - raise ValueError(f"Image size should be int or list/tuple of int not {image_size}") - - while pil_image.size[1] >= 2 * image_height: - pil_image = pil_image.resize( - tuple(x // 2 for x in pil_image.size), resample=Image.BOX - ) - - scale = image_height / pil_image.size[1] - pil_image = pil_image.resize(tuple(round(x * scale) for x in pil_image.size),resample=Image.BICUBIC) - - # shrink - if pil_image.size[0] > image_width: - pil_image = pil_image.resize((image_width, image_height),resample=Image.BICUBIC) - - # padding - if pil_image.size[0] < image_width: - img = Image.new(mode="RGB",size=(image_width,image_height), color="white") - width, _ = pil_image.size - img.paste(pil_image,((image_width - width)//2, 0)) - pil_image = img - - return pil_image - -def render_text_image_custom(image_size, bboxes, rendered_txt_values, num_rows_values, align = "center"): - # aligns = ["center", "left", "right"] - """Render text image based on the list of bbox called `bboxes`. - Support font that can be choosed. - """ - print(image_size, bboxes, rendered_txt_values, num_rows_values, align) - background = Image.new("RGB", image_size, "white") - font = ImageFont.truetype("calibri.ttf", encoding='utf-8', size=512) - - for text, bbox, num_rows in zip(rendered_txt_values, bboxes, num_rows_values): - - if len(text) == 0: - continue - - text = text.strip() - if num_rows != 1: - word_tokens = text.split() - num_tokens = len(word_tokens) - index_list = range(1, num_tokens + 1) - if num_tokens > num_rows: - index_list = random.sample(index_list, num_rows) - index_list.sort() - line_list = [] - start_idx = 0 - for index in index_list: - line_list.append( - " ".join(word_tokens - [start_idx: index] - ) - ) - start_idx = index - text = "\n".join(line_list) - - if 'ratio' not in bbox or bbox['ratio'] == 0 or bbox['ratio'] < 1e-4: - image4ratio = Image.new("RGB", (512, 512), "white") - draw = ImageDraw.Draw(image4ratio) - _, _ , w, h = draw.textbbox(xy=(0,0),text = text, font=font) - ratio = w / h - else: - ratio = bbox['ratio'] - - width = int(bbox['width'] * image_size[1]) - height = int(width / ratio) - top_left_x = int(bbox['top_left_x'] * image_size[0]) - top_left_y = int(bbox['top_left_y'] * image_size[1]) - yaw = bbox['yaw'] - - text_image = Image.new("RGB", (512, 512), "white") - draw = ImageDraw.Draw(text_image) - x,y,w,h = draw.textbbox(xy=(0,0),text = text, font=font) - text_image = Image.new("RGB", (w, h), "white") - draw = ImageDraw.Draw(text_image) - draw.text((-x/2,-y/2), text, "black", font=font, align=align) - text_image = resize_and_pad_image(text_image, (width, height)) - text_image = text_image.rotate(angle=-yaw, expand=True, fillcolor="white") - # image = Image.new("RGB", (w, h), "white") - # draw = ImageDraw.Draw(image) - - background.paste(text_image, (top_left_x, top_left_y)) - - return background diff --git a/spaces/Abhilashvj/planogram-compliance/classify/train.py b/spaces/Abhilashvj/planogram-compliance/classify/train.py deleted file mode 100644 index a9594203469bafd05604f684ac3e546d8a733926..0000000000000000000000000000000000000000 --- a/spaces/Abhilashvj/planogram-compliance/classify/train.py +++ /dev/null @@ -1,537 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Train a YOLOv5 classifier model on a classification dataset - -Usage - Single-GPU training: - $ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224 - -Usage - Multi-GPU DDP training: - $ python -m torch.distributed.run --nproc_per_node 4 --master_port 2022 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 - -Datasets: --data mnist, fashion-mnist, cifar10, cifar100, imagenette, imagewoof, imagenet, or 'path/to/data' -YOLOv5-cls models: --model yolov5n-cls.pt, yolov5s-cls.pt, yolov5m-cls.pt, yolov5l-cls.pt, yolov5x-cls.pt -Torchvision models: --model resnet50, efficientnet_b0, etc. See https://pytorch.org/vision/stable/models.html -""" - -import argparse -import os -import subprocess -import sys -import time -from copy import deepcopy -from datetime import datetime -from pathlib import Path - -import torch -import torch.distributed as dist -import torch.hub as hub -import torch.optim.lr_scheduler as lr_scheduler -import torchvision -from torch.cuda import amp -from tqdm import tqdm - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[1] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -from classify import val as validate -from models.experimental import attempt_load -from models.yolo import ClassificationModel, DetectionModel -from utils.dataloaders import create_classification_dataloader -from utils.general import ( - DATASETS_DIR, - LOGGER, - TQDM_BAR_FORMAT, - WorkingDirectory, - check_git_info, - check_git_status, - check_requirements, - colorstr, - download, - increment_path, - init_seeds, - print_args, - yaml_save, -) -from utils.loggers import GenericLogger -from utils.plots import imshow_cls -from utils.torch_utils import ( - ModelEMA, - model_info, - reshape_classifier_output, - select_device, - smart_DDP, - smart_optimizer, - smartCrossEntropyLoss, - torch_distributed_zero_first, -) - -LOCAL_RANK = int( - os.getenv("LOCAL_RANK", -1) -) # https://pytorch.org/docs/stable/elastic/run.html -RANK = int(os.getenv("RANK", -1)) -WORLD_SIZE = int(os.getenv("WORLD_SIZE", 1)) -GIT_INFO = check_git_info() - - -def train(opt, device): - init_seeds(opt.seed + 1 + RANK, deterministic=True) - save_dir, data, bs, epochs, nw, imgsz, pretrained = ( - opt.save_dir, - Path(opt.data), - opt.batch_size, - opt.epochs, - min(os.cpu_count() - 1, opt.workers), - opt.imgsz, - str(opt.pretrained).lower() == "true", - ) - cuda = device.type != "cpu" - - # Directories - wdir = save_dir / "weights" - wdir.mkdir(parents=True, exist_ok=True) # make dir - last, best = wdir / "last.pt", wdir / "best.pt" - - # Save run settings - yaml_save(save_dir / "opt.yaml", vars(opt)) - - # Logger - logger = ( - GenericLogger(opt=opt, console_logger=LOGGER) - if RANK in {-1, 0} - else None - ) - - # Download Dataset - with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT): - data_dir = data if data.is_dir() else (DATASETS_DIR / data) - if not data_dir.is_dir(): - LOGGER.info( - f"\nDataset not found ⚠️, missing path {data_dir}, attempting download..." - ) - t = time.time() - if str(data) == "imagenet": - subprocess.run( - f"bash {ROOT / 'data/scripts/get_imagenet.sh'}", - shell=True, - check=True, - ) - else: - url = f"https://github.com/ultralytics/yolov5/releases/download/v1.0/{data}.zip" - download(url, dir=data_dir.parent) - s = f"Dataset download success ✅ ({time.time() - t:.1f}s), saved to {colorstr('bold', data_dir)}\n" - LOGGER.info(s) - - # Dataloaders - nc = len( - [x for x in (data_dir / "train").glob("*") if x.is_dir()] - ) # number of classes - trainloader = create_classification_dataloader( - path=data_dir / "train", - imgsz=imgsz, - batch_size=bs // WORLD_SIZE, - augment=True, - cache=opt.cache, - rank=LOCAL_RANK, - workers=nw, - ) - - test_dir = ( - data_dir / "test" if (data_dir / "test").exists() else data_dir / "val" - ) # data/test or data/val - if RANK in {-1, 0}: - testloader = create_classification_dataloader( - path=test_dir, - imgsz=imgsz, - batch_size=bs // WORLD_SIZE * 2, - augment=False, - cache=opt.cache, - rank=-1, - workers=nw, - ) - - # Model - with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT): - if Path(opt.model).is_file() or opt.model.endswith(".pt"): - model = attempt_load(opt.model, device="cpu", fuse=False) - elif ( - opt.model in torchvision.models.__dict__ - ): # TorchVision models i.e. resnet50, efficientnet_b0 - model = torchvision.models.__dict__[opt.model]( - weights="IMAGENET1K_V1" if pretrained else None - ) - else: - m = hub.list( - "ultralytics/yolov5" - ) # + hub.list('pytorch/vision') # models - raise ModuleNotFoundError( - f"--model {opt.model} not found. Available models are: \n" - + "\n".join(m) - ) - if isinstance(model, DetectionModel): - LOGGER.warning( - "WARNING ⚠️ pass YOLOv5 classifier model with '-cls' suffix, i.e. '--model yolov5s-cls.pt'" - ) - model = ClassificationModel( - model=model, nc=nc, cutoff=opt.cutoff or 10 - ) # convert to classification model - reshape_classifier_output(model, nc) # update class count - for m in model.modules(): - if not pretrained and hasattr(m, "reset_parameters"): - m.reset_parameters() - if isinstance(m, torch.nn.Dropout) and opt.dropout is not None: - m.p = opt.dropout # set dropout - for p in model.parameters(): - p.requires_grad = True # for training - model = model.to(device) - - # Info - if RANK in {-1, 0}: - model.names = trainloader.dataset.classes # attach class names - model.transforms = ( - testloader.dataset.torch_transforms - ) # attach inference transforms - model_info(model) - if opt.verbose: - LOGGER.info(model) - images, labels = next(iter(trainloader)) - file = imshow_cls( - images[:25], - labels[:25], - names=model.names, - f=save_dir / "train_images.jpg", - ) - logger.log_images(file, name="Train Examples") - logger.log_graph(model, imgsz) # log model - - # Optimizer - optimizer = smart_optimizer( - model, opt.optimizer, opt.lr0, momentum=0.9, decay=opt.decay - ) - - # Scheduler - lrf = 0.01 # final lr (fraction of lr0) - # lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - lrf) + lrf # cosine - lf = lambda x: (1 - x / epochs) * (1 - lrf) + lrf # linear - scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) - # scheduler = lr_scheduler.OneCycleLR(optimizer, max_lr=lr0, total_steps=epochs, pct_start=0.1, - # final_div_factor=1 / 25 / lrf) - - # EMA - ema = ModelEMA(model) if RANK in {-1, 0} else None - - # DDP mode - if cuda and RANK != -1: - model = smart_DDP(model) - - # Train - t0 = time.time() - criterion = smartCrossEntropyLoss( - label_smoothing=opt.label_smoothing - ) # loss function - best_fitness = 0.0 - scaler = amp.GradScaler(enabled=cuda) - val = test_dir.stem # 'val' or 'test' - LOGGER.info( - f"Image sizes {imgsz} train, {imgsz} test\n" - f"Using {nw * WORLD_SIZE} dataloader workers\n" - f"Logging results to {colorstr('bold', save_dir)}\n" - f"Starting {opt.model} training on {data} dataset with {nc} classes for {epochs} epochs...\n\n" - f"{'Epoch':>10}{'GPU_mem':>10}{'train_loss':>12}{f'{val}_loss':>12}{'top1_acc':>12}{'top5_acc':>12}" - ) - for epoch in range(epochs): # loop over the dataset multiple times - tloss, vloss, fitness = 0.0, 0.0, 0.0 # train loss, val loss, fitness - model.train() - if RANK != -1: - trainloader.sampler.set_epoch(epoch) - pbar = enumerate(trainloader) - if RANK in {-1, 0}: - pbar = tqdm( - enumerate(trainloader), - total=len(trainloader), - bar_format=TQDM_BAR_FORMAT, - ) - for i, (images, labels) in pbar: # progress bar - images, labels = images.to(device, non_blocking=True), labels.to( - device - ) - - # Forward - with amp.autocast(enabled=cuda): # stability issues when enabled - loss = criterion(model(images), labels) - - # Backward - scaler.scale(loss).backward() - - # Optimize - scaler.unscale_(optimizer) # unscale gradients - torch.nn.utils.clip_grad_norm_( - model.parameters(), max_norm=10.0 - ) # clip gradients - scaler.step(optimizer) - scaler.update() - optimizer.zero_grad() - if ema: - ema.update(model) - - if RANK in {-1, 0}: - # Print - tloss = (tloss * i + loss.item()) / ( - i + 1 - ) # update mean losses - mem = "%.3gG" % ( - torch.cuda.memory_reserved() / 1e9 - if torch.cuda.is_available() - else 0 - ) # (GB) - pbar.desc = ( - f"{f'{epoch + 1}/{epochs}':>10}{mem:>10}{tloss:>12.3g}" - + " " * 36 - ) - - # Test - if i == len(pbar) - 1: # last batch - top1, top5, vloss = validate.run( - model=ema.ema, - dataloader=testloader, - criterion=criterion, - pbar=pbar, - ) # test accuracy, loss - fitness = top1 # define fitness as top1 accuracy - - # Scheduler - scheduler.step() - - # Log metrics - if RANK in {-1, 0}: - # Best fitness - if fitness > best_fitness: - best_fitness = fitness - - # Log - metrics = { - "train/loss": tloss, - f"{val}/loss": vloss, - "metrics/accuracy_top1": top1, - "metrics/accuracy_top5": top5, - "lr/0": optimizer.param_groups[0]["lr"], - } # learning rate - logger.log_metrics(metrics, epoch) - - # Save model - final_epoch = epoch + 1 == epochs - if (not opt.nosave) or final_epoch: - ckpt = { - "epoch": epoch, - "best_fitness": best_fitness, - "model": deepcopy( - ema.ema - ).half(), # deepcopy(de_parallel(model)).half(), - "ema": None, # deepcopy(ema.ema).half(), - "updates": ema.updates, - "optimizer": None, # optimizer.state_dict(), - "opt": vars(opt), - "git": GIT_INFO, # {remote, branch, commit} if a git repo - "date": datetime.now().isoformat(), - } - - # Save last, best and delete - torch.save(ckpt, last) - if best_fitness == fitness: - torch.save(ckpt, best) - del ckpt - - # Train complete - if RANK in {-1, 0} and final_epoch: - LOGGER.info( - f"\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)" - f"\nResults saved to {colorstr('bold', save_dir)}" - f"\nPredict: python classify/predict.py --weights {best} --source im.jpg" - f"\nValidate: python classify/val.py --weights {best} --data {data_dir}" - f"\nExport: python export.py --weights {best} --include onnx" - f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{best}')" - f"\nVisualize: https://netron.app\n" - ) - - # Plot examples - images, labels = ( - x[:25] for x in next(iter(testloader)) - ) # first 25 images and labels - pred = torch.max(ema.ema(images.to(device)), 1)[1] - file = imshow_cls( - images, - labels, - pred, - model.names, - verbose=False, - f=save_dir / "test_images.jpg", - ) - - # Log results - meta = { - "epochs": epochs, - "top1_acc": best_fitness, - "date": datetime.now().isoformat(), - } - logger.log_images( - file, name="Test Examples (true-predicted)", epoch=epoch - ) - logger.log_model(best, epochs, metadata=meta) - - -def parse_opt(known=False): - parser = argparse.ArgumentParser() - parser.add_argument( - "--model", - type=str, - default="yolov5s-cls.pt", - help="initial weights path", - ) - parser.add_argument( - "--data", - type=str, - default="imagenette160", - help="cifar10, cifar100, mnist, imagenet, ...", - ) - parser.add_argument( - "--epochs", type=int, default=10, help="total training epochs" - ) - parser.add_argument( - "--batch-size", - type=int, - default=64, - help="total batch size for all GPUs", - ) - parser.add_argument( - "--imgsz", - "--img", - "--img-size", - type=int, - default=224, - help="train, val image size (pixels)", - ) - parser.add_argument( - "--nosave", action="store_true", help="only save final checkpoint" - ) - parser.add_argument( - "--cache", - type=str, - nargs="?", - const="ram", - help='--cache images in "ram" (default) or "disk"', - ) - parser.add_argument( - "--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu" - ) - parser.add_argument( - "--workers", - type=int, - default=8, - help="max dataloader workers (per RANK in DDP mode)", - ) - parser.add_argument( - "--project", - default=ROOT / "runs/train-cls", - help="save to project/name", - ) - parser.add_argument("--name", default="exp", help="save to project/name") - parser.add_argument( - "--exist-ok", - action="store_true", - help="existing project/name ok, do not increment", - ) - parser.add_argument( - "--pretrained", - nargs="?", - const=True, - default=True, - help="start from i.e. --pretrained False", - ) - parser.add_argument( - "--optimizer", - choices=["SGD", "Adam", "AdamW", "RMSProp"], - default="Adam", - help="optimizer", - ) - parser.add_argument( - "--lr0", type=float, default=0.001, help="initial learning rate" - ) - parser.add_argument( - "--decay", type=float, default=5e-5, help="weight decay" - ) - parser.add_argument( - "--label-smoothing", - type=float, - default=0.1, - help="Label smoothing epsilon", - ) - parser.add_argument( - "--cutoff", - type=int, - default=None, - help="Model layer cutoff index for Classify() head", - ) - parser.add_argument( - "--dropout", type=float, default=None, help="Dropout (fraction)" - ) - parser.add_argument("--verbose", action="store_true", help="Verbose mode") - parser.add_argument( - "--seed", type=int, default=0, help="Global training seed" - ) - parser.add_argument( - "--local_rank", - type=int, - default=-1, - help="Automatic DDP Multi-GPU argument, do not modify", - ) - return parser.parse_known_args()[0] if known else parser.parse_args() - - -def main(opt): - # Checks - if RANK in {-1, 0}: - print_args(vars(opt)) - check_git_status() - check_requirements() - - # DDP mode - device = select_device(opt.device, batch_size=opt.batch_size) - if LOCAL_RANK != -1: - assert ( - opt.batch_size != -1 - ), "AutoBatch is coming soon for classification, please pass a valid --batch-size" - assert ( - opt.batch_size % WORLD_SIZE == 0 - ), f"--batch-size {opt.batch_size} must be multiple of WORLD_SIZE" - assert ( - torch.cuda.device_count() > LOCAL_RANK - ), "insufficient CUDA devices for DDP command" - torch.cuda.set_device(LOCAL_RANK) - device = torch.device("cuda", LOCAL_RANK) - dist.init_process_group( - backend="nccl" if dist.is_nccl_available() else "gloo" - ) - - # Parameters - opt.save_dir = increment_path( - Path(opt.project) / opt.name, exist_ok=opt.exist_ok - ) # increment run - - # Train - train(opt, device) - - -def run(**kwargs): - # Usage: from yolov5 import classify; classify.train.run(data=mnist, imgsz=320, model='yolov5m') - opt = parse_opt(True) - for k, v in kwargs.items(): - setattr(opt, k, v) - main(opt) - return opt - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) diff --git a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/deprecated/Equing.py b/spaces/AchyuthGamer/OpenGPT/g4f/Provider/deprecated/Equing.py deleted file mode 100644 index 794274f26a417b41ba487bcd113741c0bc61072e..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/deprecated/Equing.py +++ /dev/null @@ -1,81 +0,0 @@ -from __future__ import annotations - -import json -from abc import ABC, abstractmethod - -import requests - -from ...typing import Any, CreateResult -from ..base_provider import BaseProvider - - -class Equing(BaseProvider): - url: str = 'https://next.eqing.tech/' - working = False - supports_stream = True - supports_gpt_35_turbo = True - supports_gpt_4 = False - - @staticmethod - @abstractmethod - def create_completion( - model: str, - messages: list[dict[str, str]], - stream: bool, **kwargs: Any) -> CreateResult: - - headers = { - 'authority' : 'next.eqing.tech', - 'accept' : 'text/event-stream', - 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', - 'cache-control' : 'no-cache', - 'content-type' : 'application/json', - 'origin' : 'https://next.eqing.tech', - 'plugins' : '0', - 'pragma' : 'no-cache', - 'referer' : 'https://next.eqing.tech/', - 'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"', - 'sec-ch-ua-mobile' : '?0', - 'sec-ch-ua-platform': '"macOS"', - 'sec-fetch-dest' : 'empty', - 'sec-fetch-mode' : 'cors', - 'sec-fetch-site' : 'same-origin', - 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36', - 'usesearch' : 'false', - 'x-requested-with' : 'XMLHttpRequest' - } - - json_data = { - 'messages' : messages, - 'stream' : stream, - 'model' : model, - 'temperature' : kwargs.get('temperature', 0.5), - 'presence_penalty' : kwargs.get('presence_penalty', 0), - 'frequency_penalty' : kwargs.get('frequency_penalty', 0), - 'top_p' : kwargs.get('top_p', 1), - } - - response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions', - headers=headers, json=json_data, stream=stream) - - if not stream: - yield response.json()["choices"][0]["message"]["content"] - return - - for line in response.iter_content(chunk_size=1024): - if line: - if b'content' in line: - line_json = json.loads(line.decode('utf-8').split('data: ')[1]) - token = line_json['choices'][0]['delta'].get('content') - if token: - yield token - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/spaces/Aditya9790/yolo7-object-tracking/models/experimental.py b/spaces/Aditya9790/yolo7-object-tracking/models/experimental.py deleted file mode 100644 index 735d7aa0ebe7dbf3c4b062ebc3858cb5f9ebab40..0000000000000000000000000000000000000000 --- a/spaces/Aditya9790/yolo7-object-tracking/models/experimental.py +++ /dev/null @@ -1,272 +0,0 @@ -import numpy as np -import random -import torch -import torch.nn as nn - -from models.common import Conv, DWConv -from utils.google_utils import attempt_download - - -class CrossConv(nn.Module): - # Cross Convolution Downsample - def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): - # ch_in, ch_out, kernel, stride, groups, expansion, shortcut - super(CrossConv, self).__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, (1, k), (1, s)) - self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) - self.add = shortcut and c1 == c2 - - def forward(self, x): - return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) - - -class Sum(nn.Module): - # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 - def __init__(self, n, weight=False): # n: number of inputs - super(Sum, self).__init__() - self.weight = weight # apply weights boolean - self.iter = range(n - 1) # iter object - if weight: - self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights - - def forward(self, x): - y = x[0] # no weight - if self.weight: - w = torch.sigmoid(self.w) * 2 - for i in self.iter: - y = y + x[i + 1] * w[i] - else: - for i in self.iter: - y = y + x[i + 1] - return y - - -class MixConv2d(nn.Module): - # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595 - def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): - super(MixConv2d, self).__init__() - groups = len(k) - if equal_ch: # equal c_ per group - i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices - c_ = [(i == g).sum() for g in range(groups)] # intermediate channels - else: # equal weight.numel() per group - b = [c2] + [0] * groups - a = np.eye(groups + 1, groups, k=-1) - a -= np.roll(a, 1, axis=1) - a *= np.array(k) ** 2 - a[0] = 1 - c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b - - self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)]) - self.bn = nn.BatchNorm2d(c2) - self.act = nn.LeakyReLU(0.1, inplace=True) - - def forward(self, x): - return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) - - -class Ensemble(nn.ModuleList): - # Ensemble of models - def __init__(self): - super(Ensemble, self).__init__() - - def forward(self, x, augment=False): - y = [] - for module in self: - y.append(module(x, augment)[0]) - # y = torch.stack(y).max(0)[0] # max ensemble - # y = torch.stack(y).mean(0) # mean ensemble - y = torch.cat(y, 1) # nms ensemble - return y, None # inference, train output - - - - - -class ORT_NMS(torch.autograd.Function): - '''ONNX-Runtime NMS operation''' - @staticmethod - def forward(ctx, - boxes, - scores, - max_output_boxes_per_class=torch.tensor([100]), - iou_threshold=torch.tensor([0.45]), - score_threshold=torch.tensor([0.25])): - device = boxes.device - batch = scores.shape[0] - num_det = random.randint(0, 100) - batches = torch.randint(0, batch, (num_det,)).sort()[0].to(device) - idxs = torch.arange(100, 100 + num_det).to(device) - zeros = torch.zeros((num_det,), dtype=torch.int64).to(device) - selected_indices = torch.cat([batches[None], zeros[None], idxs[None]], 0).T.contiguous() - selected_indices = selected_indices.to(torch.int64) - return selected_indices - - @staticmethod - def symbolic(g, boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold): - return g.op("NonMaxSuppression", boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold) - - -class TRT_NMS(torch.autograd.Function): - '''TensorRT NMS operation''' - @staticmethod - def forward( - ctx, - boxes, - scores, - background_class=-1, - box_coding=1, - iou_threshold=0.45, - max_output_boxes=100, - plugin_version="1", - score_activation=0, - score_threshold=0.25, - ): - batch_size, num_boxes, num_classes = scores.shape - num_det = torch.randint(0, max_output_boxes, (batch_size, 1), dtype=torch.int32) - det_boxes = torch.randn(batch_size, max_output_boxes, 4) - det_scores = torch.randn(batch_size, max_output_boxes) - det_classes = torch.randint(0, num_classes, (batch_size, max_output_boxes), dtype=torch.int32) - return num_det, det_boxes, det_scores, det_classes - - @staticmethod - def symbolic(g, - boxes, - scores, - background_class=-1, - box_coding=1, - iou_threshold=0.45, - max_output_boxes=100, - plugin_version="1", - score_activation=0, - score_threshold=0.25): - out = g.op("TRT::EfficientNMS_TRT", - boxes, - scores, - background_class_i=background_class, - box_coding_i=box_coding, - iou_threshold_f=iou_threshold, - max_output_boxes_i=max_output_boxes, - plugin_version_s=plugin_version, - score_activation_i=score_activation, - score_threshold_f=score_threshold, - outputs=4) - nums, boxes, scores, classes = out - return nums, boxes, scores, classes - - -class ONNX_ORT(nn.Module): - '''onnx module with ONNX-Runtime NMS operation.''' - def __init__(self, max_obj=100, iou_thres=0.45, score_thres=0.25, max_wh=640, device=None, n_classes=80): - super().__init__() - self.device = device if device else torch.device("cpu") - self.max_obj = torch.tensor([max_obj]).to(device) - self.iou_threshold = torch.tensor([iou_thres]).to(device) - self.score_threshold = torch.tensor([score_thres]).to(device) - self.max_wh = max_wh # if max_wh != 0 : non-agnostic else : agnostic - self.convert_matrix = torch.tensor([[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]], - dtype=torch.float32, - device=self.device) - self.n_classes=n_classes - - def forward(self, x): - boxes = x[:, :, :4] - conf = x[:, :, 4:5] - scores = x[:, :, 5:] - if self.n_classes == 1: - scores = conf # for models with one class, cls_loss is 0 and cls_conf is always 0.5, - # so there is no need to multiplicate. - else: - scores *= conf # conf = obj_conf * cls_conf - boxes @= self.convert_matrix - max_score, category_id = scores.max(2, keepdim=True) - dis = category_id.float() * self.max_wh - nmsbox = boxes + dis - max_score_tp = max_score.transpose(1, 2).contiguous() - selected_indices = ORT_NMS.apply(nmsbox, max_score_tp, self.max_obj, self.iou_threshold, self.score_threshold) - X, Y = selected_indices[:, 0], selected_indices[:, 2] - selected_boxes = boxes[X, Y, :] - selected_categories = category_id[X, Y, :].float() - selected_scores = max_score[X, Y, :] - X = X.unsqueeze(1).float() - return torch.cat([X, selected_boxes, selected_categories, selected_scores], 1) - -class ONNX_TRT(nn.Module): - '''onnx module with TensorRT NMS operation.''' - def __init__(self, max_obj=100, iou_thres=0.45, score_thres=0.25, max_wh=None ,device=None, n_classes=80): - super().__init__() - assert max_wh is None - self.device = device if device else torch.device('cpu') - self.background_class = -1, - self.box_coding = 1, - self.iou_threshold = iou_thres - self.max_obj = max_obj - self.plugin_version = '1' - self.score_activation = 0 - self.score_threshold = score_thres - self.n_classes=n_classes - - def forward(self, x): - boxes = x[:, :, :4] - conf = x[:, :, 4:5] - scores = x[:, :, 5:] - if self.n_classes == 1: - scores = conf # for models with one class, cls_loss is 0 and cls_conf is always 0.5, - # so there is no need to multiplicate. - else: - scores *= conf # conf = obj_conf * cls_conf - num_det, det_boxes, det_scores, det_classes = TRT_NMS.apply(boxes, scores, self.background_class, self.box_coding, - self.iou_threshold, self.max_obj, - self.plugin_version, self.score_activation, - self.score_threshold) - return num_det, det_boxes, det_scores, det_classes - - -class End2End(nn.Module): - '''export onnx or tensorrt model with NMS operation.''' - def __init__(self, model, max_obj=100, iou_thres=0.45, score_thres=0.25, max_wh=None, device=None, n_classes=80): - super().__init__() - device = device if device else torch.device('cpu') - assert isinstance(max_wh,(int)) or max_wh is None - self.model = model.to(device) - self.model.model[-1].end2end = True - self.patch_model = ONNX_TRT if max_wh is None else ONNX_ORT - self.end2end = self.patch_model(max_obj, iou_thres, score_thres, max_wh, device, n_classes) - self.end2end.eval() - - def forward(self, x): - x = self.model(x) - x = self.end2end(x) - return x - - - - - -def attempt_load(weights, map_location=None): - # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a - model = Ensemble() - for w in weights if isinstance(weights, list) else [weights]: - attempt_download(w) - ckpt = torch.load(w, map_location=map_location) # load - model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model - - # Compatibility updates - for m in model.modules(): - if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: - m.inplace = True # pytorch 1.7.0 compatibility - elif type(m) is nn.Upsample: - m.recompute_scale_factor = None # torch 1.11.0 compatibility - elif type(m) is Conv: - m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility - - if len(model) == 1: - return model[-1] # return model - else: - print('Ensemble created with %s\n' % weights) - for k in ['names', 'stride']: - setattr(model, k, getattr(model[-1], k)) - return model # return ensemble - - diff --git a/spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/text/ngu_dialect.py b/spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/text/ngu_dialect.py deleted file mode 100644 index ce3e12bbf0469426872eed5f681985d3e1be9b26..0000000000000000000000000000000000000000 --- a/spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/text/ngu_dialect.py +++ /dev/null @@ -1,30 +0,0 @@ -import re -import opencc - - -dialects = {'SZ': 'suzhou', 'WX': 'wuxi', 'CZ': 'changzhou', 'HZ': 'hangzhou', - 'SX': 'shaoxing', 'NB': 'ningbo', 'JJ': 'jingjiang', 'YX': 'yixing', - 'JD': 'jiading', 'ZR': 'zhenru', 'PH': 'pinghu', 'TX': 'tongxiang', - 'JS': 'jiashan', 'HN': 'xiashi', 'LP': 'linping', 'XS': 'xiaoshan', - 'FY': 'fuyang', 'RA': 'ruao', 'CX': 'cixi', 'SM': 'sanmen', - 'TT': 'tiantai', 'WZ': 'wenzhou', 'SC': 'suichang', 'YB': 'youbu'} - -converters = {} - -for dialect in dialects.values(): - try: - converters[dialect] = opencc.OpenCC(dialect) - except: - pass - - -def ngu_dialect_to_ipa(text, dialect): - dialect = dialects[dialect] - text = converters[dialect].convert(text).replace('-','').replace('$',' ') - text = re.sub(r'[、;:]', ',', text) - text = re.sub(r'\s*,\s*', ', ', text) - text = re.sub(r'\s*。\s*', '. ', text) - text = re.sub(r'\s*?\s*', '? ', text) - text = re.sub(r'\s*!\s*', '! ', text) - text = re.sub(r'\s*$', '', text) - return text diff --git a/spaces/AlexWang/lama/bin/predict_inner_features.py b/spaces/AlexWang/lama/bin/predict_inner_features.py deleted file mode 100644 index 4f9f7a11a6c4757a4eaa05cf1ac648d372f7e02f..0000000000000000000000000000000000000000 --- a/spaces/AlexWang/lama/bin/predict_inner_features.py +++ /dev/null @@ -1,119 +0,0 @@ -#!/usr/bin/env python3 - -# Example command: -# ./bin/predict.py \ -# model.path= \ -# indir= \ -# outdir= - -import logging -import os -import sys -import traceback - -from saicinpainting.evaluation.utils import move_to_device - -os.environ['OMP_NUM_THREADS'] = '1' -os.environ['OPENBLAS_NUM_THREADS'] = '1' -os.environ['MKL_NUM_THREADS'] = '1' -os.environ['VECLIB_MAXIMUM_THREADS'] = '1' -os.environ['NUMEXPR_NUM_THREADS'] = '1' - -import cv2 -import hydra -import numpy as np -import torch -import tqdm -import yaml -from omegaconf import OmegaConf -from torch.utils.data._utils.collate import default_collate - -from saicinpainting.training.data.datasets import make_default_val_dataset -from saicinpainting.training.trainers import load_checkpoint, DefaultInpaintingTrainingModule -from saicinpainting.utils import register_debug_signal_handlers, get_shape - -LOGGER = logging.getLogger(__name__) - - -@hydra.main(config_path='../configs/prediction', config_name='default_inner_features.yaml') -def main(predict_config: OmegaConf): - try: - register_debug_signal_handlers() # kill -10 will result in traceback dumped into log - - device = torch.device(predict_config.device) - - train_config_path = os.path.join(predict_config.model.path, 'config.yaml') - with open(train_config_path, 'r') as f: - train_config = OmegaConf.create(yaml.safe_load(f)) - - checkpoint_path = os.path.join(predict_config.model.path, 'models', predict_config.model.checkpoint) - model = load_checkpoint(train_config, checkpoint_path, strict=False) - model.freeze() - model.to(device) - - assert isinstance(model, DefaultInpaintingTrainingModule), 'Only DefaultInpaintingTrainingModule is supported' - assert isinstance(getattr(model.generator, 'model', None), torch.nn.Sequential) - - if not predict_config.indir.endswith('/'): - predict_config.indir += '/' - - dataset = make_default_val_dataset(predict_config.indir, **predict_config.dataset) - - max_level = max(predict_config.levels) - - with torch.no_grad(): - for img_i in tqdm.trange(len(dataset)): - mask_fname = dataset.mask_filenames[img_i] - cur_out_fname = os.path.join(predict_config.outdir, os.path.splitext(mask_fname[len(predict_config.indir):])[0]) - os.makedirs(os.path.dirname(cur_out_fname), exist_ok=True) - - batch = move_to_device(default_collate([dataset[img_i]]), device) - - img = batch['image'] - mask = batch['mask'] - mask[:] = 0 - mask_h, mask_w = mask.shape[-2:] - mask[:, :, - mask_h // 2 - predict_config.hole_radius : mask_h // 2 + predict_config.hole_radius, - mask_w // 2 - predict_config.hole_radius : mask_w // 2 + predict_config.hole_radius] = 1 - - masked_img = torch.cat([img * (1 - mask), mask], dim=1) - - feats = masked_img - for level_i, level in enumerate(model.generator.model): - feats = level(feats) - if level_i in predict_config.levels: - cur_feats = torch.cat([f for f in feats if torch.is_tensor(f)], dim=1) \ - if isinstance(feats, tuple) else feats - - if predict_config.slice_channels: - cur_feats = cur_feats[:, slice(*predict_config.slice_channels)] - - cur_feat = cur_feats.pow(2).mean(1).pow(0.5).clone() - cur_feat -= cur_feat.min() - cur_feat /= cur_feat.std() - cur_feat = cur_feat.clamp(0, 1) / 1 - cur_feat = cur_feat.cpu().numpy()[0] - cur_feat *= 255 - cur_feat = np.clip(cur_feat, 0, 255).astype('uint8') - cv2.imwrite(cur_out_fname + f'_lev{level_i:02d}_norm.png', cur_feat) - - # for channel_i in predict_config.channels: - # - # cur_feat = cur_feats[0, channel_i].clone().detach().cpu().numpy() - # cur_feat -= cur_feat.min() - # cur_feat /= cur_feat.max() - # cur_feat *= 255 - # cur_feat = np.clip(cur_feat, 0, 255).astype('uint8') - # cv2.imwrite(cur_out_fname + f'_lev{level_i}_ch{channel_i}.png', cur_feat) - elif level_i >= max_level: - break - except KeyboardInterrupt: - LOGGER.warning('Interrupted by user') - except Exception as ex: - LOGGER.critical(f'Prediction failed due to {ex}:\n{traceback.format_exc()}') - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/spaces/Ame42/rwms/local_utils.py b/spaces/Ame42/rwms/local_utils.py deleted file mode 100644 index 4de37df85c5c9aae3a1d8f786631e40cee878e5e..0000000000000000000000000000000000000000 --- a/spaces/Ame42/rwms/local_utils.py +++ /dev/null @@ -1,344 +0,0 @@ -import math -import re -import numpy -import pandas -from sklearn.ensemble import RandomForestRegressor -from sklearn.tree import export_graphviz -import pickle as pkl - -l2 = "2L" -l1 = "1L" -s2 = "2S" -s1 = "1S" -date_time_col = "Date Time (GMT+01)" -time_col = "Time (GMT+01)" -dur_col = "Daylight duration (SEC)" -date_col = "Date" -id_col = "id" -well_col = "Well index" -blind_col = "THP BLIND (PSI)" -temp_col = "TEMP (°F)" -flp_col = "FLP (PSI)" -ro_col = "THP R/O (PSI)" -man_col = "Manifold Pressure (PSI)" -sim_col = f'Predicted {ro_col}' -ql_col = 'Liquid production (BBL/D)' -out_folder = "output/" -well_key = "wellhead" -flow_key = "flowstation" - -model_file = "rf-AWNW" -scaler_file = "ss-AWNW" - -day_mode = '22-11-2020' -all_mode = 'All' -train_mode = 'Train' -test_mode = 'Test' - - -def round_to_n(x, n): - x = x if x % 10 != 5 else x + 1 - n = n if x > 9 else n - 1 - return x if x == 0 else round(x, -int(math.floor(math.log10(abs(x)))) + (n - 1)) - - -def to_sec(h, m, s): - return (int(h) * 60 * 60) + (int(m) * 60) + int(s) - - -def from_sec(t): - return f"{t // (60 * 60):0>2}:{(t % (60 * 60)) // 60:0>2}:{(t % (60 * 60)) % 60:0>2}" - - -def column_matcher(title): - if re.search("#", string=title) is not None: - found = id_col - elif re.search(".*(Date|DATE).*(Time|TIME).*GMT.*", string=title) is not None: - found = date_time_col - elif re.search("THP.*R/O.*(PSI|units)", string=title) is not None: - found = ro_col - elif re.search(".*TEMP.*(F|units)", string=title) is not None: - found = temp_col - elif re.search(".*FLP.*(PSI|units)", string=title) is not None: - found = flp_col - elif re.search("THP.*BLIND.*(PSI|units)", string=title) is not None: - found = blind_col - elif re.search("THP.*(PSI|units)", string=title) is not None: - found = blind_col - elif re.search(".*1S.*PSI.*", string=title) is not None: - found = s1 - elif re.search(".*2S.*PSI.*", string=title) is not None: - found = s2 - elif re.search(".*1L.*PSI.*", string=title) is not None: - found = l1 - elif re.search(".*2L.*PSI.*", string=title) is not None: - found = l2 - else: - found = False - - return found - - -def file_matcher(name: str): - if re.search("\\d+-\\d+-\\d+.*flow.*man.*", string=name.lower()) is not None: - flowstation = True - else: - flowstation = False - - return flowstation - - -def file_matcher2(name: str): - if re.search(".*1s.*", string=name.lower()) is not None: - well = s1 - elif re.search(".*1l.*", string=name.lower()) is not None: - well = l1 - elif re.search(".*2s.*", string=name.lower()) is not None: - well = s2 - else: - well = l2 - - return well - - -def restructure(data, count, duration, times, dates): - for datetime in data[date_time_col]: - try: - date_time = re.sub("\\.0(?=\\s)", "", datetime) - datetime_array = date_time.split() - date = datetime_array[0].split("/") - - time_array = datetime_array[1].split(":") - - if datetime_array[2] == "PM" and time_array[0] != "12": - hour = int(time_array[0]) + 12 - elif datetime_array[2] == "AM" and time_array[0] == "12": - hour = int(time_array[0]) - 12 - else: - hour = time_array[0] - - minutes = time_array[1] - sec = round_to_n(int(time_array[2]), 1) - - if sec == 60: - sec = "00" - minutes = int(minutes) + 1 - - if minutes == 60: - minutes = "00" - hour = int(hour) + 1 - - if hour == 24: - hour = "00" - date[1] = int(date[1]) + 1 - - duration.append(to_sec(hour, minutes, sec)) - times.append(f"{hour}:{minutes}:{sec}") - dates.append(f"{date[1]}/{date[0]}/{date[2]}") - date_time = f"{date[1]}/{date[0]}/{date[2]} {datetime_array[1]} {datetime_array[2]}" - - data.loc[count, date_time_col] = date_time - count += 1 - except IndexError: - print(f"\n\n{datetime}", flush=True) - raise - - data.insert(1, dur_col, numpy.array(duration), True) - data.insert(2, time_col, numpy.array(times), True) - data.insert(3, date_col, numpy.array(dates), True) - return data.drop(axis=1, columns="index", errors='ignore') - - -def try_key(temp, key): - try: - temp[f"{key}"] - except KeyError: - temp[f"{key}"] = dict() - - -def find_data(index, wlhd): - for w in wlhd: - if index == w[0]: - return w[1] - - return None - - -def split_join(flowstation: pandas.DataFrame, wellhead: pandas.DataFrame, offset): - joined = [] - info = [s1, l1, s2, l2] - for i, o in zip(info, offset): - # print(f'\n\nNow working on {i} column\n') - data = flowstation.drop(flowstation.columns.difference([i, 'Daylight duration (SEC)']), - axis=1) - data.rename(columns={i: man_col}, inplace=True) - data.insert(2, well_col, [i for _ in range(data.shape[0])], True) - - # print(f"{data.shape[0]} rows before drop and merge") - data_well = find_data(i, wellhead) - if data_well is not None: - data_well.drop_duplicates(inplace=True, subset=[time_col]) - data = data.merge(data_well, how='inner', on=[dur_col]) - - # print(f"{data.shape[0]} rows after drop and merge") - # offset the rows by the required amount 'o' - data_y = data.drop(data.columns.difference([ro_col, id_col]), axis=1, errors="ignore").iloc[o:] - data_x = data.drop(columns=[ro_col], axis=1, errors="ignore").iloc[:(data.shape[0] - 1 - o)] - data_y.reset_index(inplace=True) - data_x.reset_index(inplace=True) - data_y.drop(columns=["index"], axis=1, inplace=True) - data_x.drop(columns=["index"], axis=1, inplace=True) - data = data_y.merge(data_x, how='inner', on=[id_col]) - joined.append((i, data)) - - return joined - - -class WellDataPoint: - - def __init__(self, thp, day_sec, man_pres, temp, _l1=0, _s1=1, _l2=0, _s2=0): - self.thp = thp - self.day_sec = day_sec - self.man_pres = man_pres - self.temp = temp - self.l1 = _l1 - self.s1 = _s1 - self.l2 = _l2 - self.s2 = _s2 - - def __str__(self): - day_sec, deli, i, man_pres, temp, well, well_titles = self.fields() - return f"""\033[1;31mTesting data\033[0m -{day_sec:>20}{deli:3}{self.day_sec} seconds -{man_pres:>20}{deli:3}{self.man_pres} psi -{temp:>20}{deli:3}{self.temp} °F -{well:>20}{deli:3}{well_titles[i]} -""" - - def fields(self): - deli = ' ' - day_sec = "Day duration:" - man_pres = "Manifold Pressure:" - temp = "Temperature:" - well = "Well Name:" - wells = [self.l1, self.l2, self.s1, self.s2] - well_titles = ["Awoba NW 1L", "Awoba NW 2L", "Awoba NW 1S", "Awoba NW 2S"] # List of well titles - i = 0 - # Find the well with dummy value 1 - while not (wells[i]): # not(0) yields true and not(anything else) yields false - i += 1 - return day_sec, deli, i, man_pres, temp, well, well_titles - - def __plain__(self): - day_sec, deli, i, man_pres, temp, well, well_titles = self.fields() - space = '40' - d_space = '3' - return f"""Testing data -{day_sec:>{space}}{deli:{d_space}}{self.day_sec} seconds -{man_pres:>{space}}{deli:{d_space}}{self.man_pres} psi -{temp:>{space}}{deli:{d_space}}{self.temp} °F -{well:>{space}}{deli:{d_space}}{well_titles[i]} -""" - - def __repr__(self): - return f"Practice([{self.day_sec}, {self.man_pres}, {self.temp}, {self.l1}, {self.s1}, {self.l2}, {self.s2}])" - - def get_x(self): - return [self.day_sec, self.man_pres, self.temp, self.l1, self.s1, self.l2, self.s2] - - def get_y(self): - return self.thp - - -def oversample_balance(data: pandas.DataFrame): - # get buckets for control column - data = data.astype(float, errors='ignore') - mx = data[ro_col].max(axis=0, skipna=True) - mn = data[ro_col].min(axis=0, skipna=True) - rng = mx - mn - bucket = rng / 10 - - # shuffle data into buckets - max_count = 0 - counter = mn - temp = [] - results = [] - - while counter < mx: - - sub_data = data[data[ro_col].between(counter, counter + bucket, inclusive='right')] - if sub_data.shape[0] > 0 and float(sub_data[ro_col].min(axis=0, skipna=True)) > 0: - temp.append(sub_data) - - max_count = max_count if sub_data.shape[0] < max_count else sub_data.shape[0] - - counter += bucket - - for r in temp: - counter = 0 - pumped_data = r - print(r.shape, "\n", r.head()) - # add elements of r to pumped_data - while pumped_data.shape[0] < max_count: - new_row = r.iloc[[counter % r.shape[0]]] - - pumped_data = pandas.concat([pumped_data, new_row], ignore_index=True) - - # add final results to results series - results.append(pumped_data) - - return pandas.concat(results, ignore_index=True) - - -def parse_well_id(well_id): - return f"Awoba NW {well_id}" - - -def parse_well_id_2(well_id): - return f"Abura {well_id}" - - -def print_graph(model: RandomForestRegressor, x): - for est, idx in zip(model.estimators_, len(model.estimators_)): - file = f'tree_{idx}.dot' - export_graphviz(model, out_file=file, feature_names=x.columns, - class_names=['extreme', 'moderate', 'vulnerable', 'non-vulnerable'], - rounded=True, proportion=False, precision=4, filled=True) - - -def write_state_files(model, scaler): - pkl.dump(model, open(f"{model_file}.mdl", "wb")) - pkl.dump(scaler, open(f"{scaler_file}.sts", "wb")) - - -def keep_useful_cols(data, columns=None): - if columns is None: - columns = [ro_col, dur_col, man_col, well_col, time_col, date_col, blind_col, flp_col, temp_col] - return data.drop(data.columns.difference(columns), axis=1) - - -def read_state_files(mdl, scl): - mdl = pkl.load(open(f"{mdl}.mdl", "rb")) - scl = pkl.load(open(f"{scl}.sts", "rb")) - return mdl, scl - - -def change_well_to_dummy(wl): - _l1, _l2, _s1, _s2 = 0, 0, 0, 0 - - if wl == parse_well_id(l1): - _l1 = 1 - elif wl == parse_well_id(s1): - _s1 = 1 - elif wl == parse_well_id(l2): - _l2 = 1 - elif wl == parse_well_id(s2): - _s2 = 1 - - return _l1, _l2, _s1, _s2 - - -def calc_excel(pres): - # from well Abura 2S - return pres + 624, pres * 31.88 - diff --git a/spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/cpp/cppipc/policy.h b/spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/cpp/cppipc/policy.h deleted file mode 100644 index f88ab5d8cb343f97026966b402eaeed8831e356a..0000000000000000000000000000000000000000 --- a/spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/cpp/cppipc/policy.h +++ /dev/null @@ -1,25 +0,0 @@ -#pragma once - -#include - -#include "libipc/def.h" -#include "libipc/prod_cons.h" - -#include "libipc/circ/elem_array.h" - -namespace ipc { -namespace policy { - -template